code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
#!/usr/bin/python2.7
#
# Assignment2 Interface
#
import psycopg2
import os
import sys
import Assignment1 as a
# Donot close the connection inside this file i.e. do not perform openconnection.close()
#range__metadata = RangeRatingsMetadata
#roundR_metadata = RoundRobinRatingsMetadata
#rangetablepartition = rangeratingspart
def RangeQuery(ratingsTableName, ratingMinValue, ratingMaxValue, openconnection):
try:#Implement RangeQuery Here.
cur = openconnection.cursor()
ratings_Min = ratingMinValue
ratings_Max = ratingMaxValue
if ((0.0<=ratings_Min <= 5.0) and (0.0<=ratings_Max<= 5.0) and (ratings_Max >=ratings_Min)):
cur.execute("SELECT maxrating from rangeratingsmetadata") ### Lines to make him look at
upperbound_range = cur.fetchall() #print the last column of the select function execute above
i=0
#print upperbound_range
while(1):
#print upperbound_range[i][0]
if (ratings_Min > upperbound_range[i][0]):
i = i+1
else:
lower_bound = i
#print "the lower table index is", lower_bound
break
i = 0
while(1):
if (ratings_Max > upperbound_range[i][0]):
i = i+1
else:
upper_bound = i
#print "the upper table index is", upper_bound
break
range_list_table_lookup = range(lower_bound,upper_bound+1)
#print range_list_table_lookup
file = open("RangeQueryOut.txt","w")
for l in range_list_table_lookup:
rows = []
cur.execute('SELECT * from rangeratingspart' + str(l)) ### Lines to make him look at
rows = cur.fetchall()
#print rows
### Lines to make him look at
for row in rows:
rat = row[2]
if (ratings_Min <= rat <= ratings_Max):
file.write("{},{},{},{} \n".format("rangeratingspart" + str(l),row[0],row[1],row[2])) ### Lines to make him look at
#file.close()
cur.execute('SELECT * from RoundRobinRatingsMetadata')
numberofpartitionslist = cur.fetchall()
numberofpartitions = numberofpartitionslist[0][0]
for l in range(numberofpartitions):
cur.execute('SELECT * from RoundRobinRatingsPart' + str(l)) ### Lines to make him look at
rows = []
rows = cur.fetchall()
for row in rows:
rat = row[2]
if (ratings_Min <= rat <= ratings_Max):
file.write("{},{},{},{} \n".format("roundrobinratingspart" + str(l),row[0],row[1],row[2])) ### Lines to make him look at
file.close()
else:
print ("Please enter the valid values")
cur.close()
except Exception as E:
print E
def PointQuery(ratingsTableName, ratingValue, openconnection):
#Implement PointQuery Here.
# Remove this once you are done with implementation
cur = openconnection.cursor()
pointvalue = ratingValue
if ((0.0<=pointvalue<= 5.0)):
cur.execute('SELECT maxrating from RangeRatingsMetadata')
Range_upper = cur.fetchall()
i=0
while(1):
if (pointvalue > Range_upper[i][0]):
i = i+1
else:
table_suffxi = i
#print "the table suffix to look is", table_suffix
break
rows = []
cur.execute('SELECT * from rangeratingspart'+str(table_suffix))
rows = cur.fetchall()
file1 = open("PointQueryOut.txt","w")
for row in rows:
rat = row[2]
if (rat == pointvalue):
file1.write("{},{},{},{} \n".format("rangeratingspart"+str(table_suffix),row[0],row[1],row[2]))
#file1.close()
cur.execute('SELECT * from RoundRobinRatingsMetadata')
numberofpartitionslist = cur.fetchall()
numberofpartitions = numberofpartitionslist[0][0]
for l in range(numberofpartitions):
cur.execute('SELECT * from RoundRobinRatingsPart'+str(l))
rows = []
rows = cur.fetchall()
#file1 = open("PointQueryOut.txt","w")
for row in rows:
rat = row[2]
if (rat == pointvalue):
file1.write ("{},{},{},{} \n".format("roundrobinratingspart" + str(l),row[0],row[1],row[2]))
file1.close()
else:
print("please enter a valid rating value")
cur.close()
|
normal
|
{
"blob_id": "0c736bb5c88a8d7ee359e05fe12f0b77d83146c8",
"index": 3439,
"step-1": "#!/usr/bin/python2.7\n#\n# Assignment2 Interface\n#\n\nimport psycopg2\nimport os\nimport sys\nimport Assignment1 as a\n# Donot close the connection inside this file i.e. do not perform openconnection.close()\n#range__metadata = RangeRatingsMetadata\n#roundR_metadata = RoundRobinRatingsMetadata\n#rangetablepartition = rangeratingspart\ndef RangeQuery(ratingsTableName, ratingMinValue, ratingMaxValue, openconnection):\n\ttry:#Implement RangeQuery Here.\n\t\tcur = openconnection.cursor()\n\t\tratings_Min = ratingMinValue\n\t\tratings_Max = ratingMaxValue\n\t\tif ((0.0<=ratings_Min <= 5.0) and (0.0<=ratings_Max<= 5.0) and (ratings_Max >=ratings_Min)):\n\t\t\tcur.execute(\"SELECT maxrating from rangeratingsmetadata\") ### Lines to make him look at\n\t\t\tupperbound_range = cur.fetchall() #print the last column of the select function execute above\n\t\t\ti=0\n\t\t\t#print upperbound_range\n\t\t\twhile(1):\t\t\t\n\t\t\t\t#print upperbound_range[i][0]\n\t\t\t\tif (ratings_Min > upperbound_range[i][0]):\n\t\t\t\t\ti = i+1\n\t\t\t\telse:\n\t\t\t\t\tlower_bound = i\n\t\t\t\t\t#print \"the lower table index is\", lower_bound\n\t\t\t\t\tbreak\n\t\t\ti = 0\n\t\t\twhile(1):\n\t\t\t\tif (ratings_Max > upperbound_range[i][0]):\n\t\t\t\t\ti = i+1\n\t\t\t\telse:\n\t\t\t\t\tupper_bound = i\n\t\t\t\t\t#print \"the upper table index is\", upper_bound\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\trange_list_table_lookup = range(lower_bound,upper_bound+1)\n\t\t\t#print range_list_table_lookup\n\t\t\tfile = open(\"RangeQueryOut.txt\",\"w\")\n\t\t\tfor l in range_list_table_lookup:\n\t\t\t\trows = []\n\t\t\t\tcur.execute('SELECT * from rangeratingspart' + str(l)) ### Lines to make him look at\n\t\t\t\trows = cur.fetchall()\n\t\t\t\t#print rows\n\t\t\t\t ### Lines to make him look at\n\t\t\t\tfor row in rows:\n\t\t\t\t\trat = row[2]\n\t\t\t\t\tif (ratings_Min <= rat <= ratings_Max):\n\t\t\t\t\t\tfile.write(\"{},{},{},{} \\n\".format(\"rangeratingspart\" + str(l),row[0],row[1],row[2])) ### Lines to make him look at\n\t\t\t#file.close()\n\t\t\tcur.execute('SELECT * from RoundRobinRatingsMetadata')\n\t\t\tnumberofpartitionslist = cur.fetchall()\n\t\t\tnumberofpartitions = numberofpartitionslist[0][0]\n\t\t\tfor l in range(numberofpartitions):\n\t\t\t\tcur.execute('SELECT * from RoundRobinRatingsPart' + str(l)) ### Lines to make him look at\n\t\t\t\trows = []\n\t\t\t\trows = cur.fetchall()\n\t\t\t\t\n\t\t\t\tfor row in rows:\n\t\t\t\t\trat = row[2]\n\t\t\t\t\tif (ratings_Min <= rat <= ratings_Max):\n\t\t\t\t\t\tfile.write(\"{},{},{},{} \\n\".format(\"roundrobinratingspart\" + str(l),row[0],row[1],row[2])) ### Lines to make him look at\n\t\t\tfile.close()\n\t\telse:\n\t\t\tprint (\"Please enter the valid values\")\n\t\t\t\n\t\tcur.close()\n\texcept Exception as E:\n\t\tprint E\n\n\n\ndef PointQuery(ratingsTableName, ratingValue, openconnection):\n\t#Implement PointQuery Here.\n\t# Remove this once you are done with implementation\n\tcur = openconnection.cursor()\n\tpointvalue = ratingValue\n\tif ((0.0<=pointvalue<= 5.0)):\n\t\tcur.execute('SELECT maxrating from RangeRatingsMetadata')\n\t\tRange_upper = cur.fetchall()\n\t\ti=0\n\t\twhile(1):\n\t\t\tif (pointvalue > Range_upper[i][0]):\n\t\t\t\ti = i+1\n\t\t\telse:\n\t\t\t\ttable_suffxi = i\n\t\t\t\t#print \"the table suffix to look is\", table_suffix\n\t\t\t\tbreak\n\t\trows = []\n\t\tcur.execute('SELECT * from rangeratingspart'+str(table_suffix))\n\t\trows = cur.fetchall()\n\t\tfile1 = open(\"PointQueryOut.txt\",\"w\")\n\t\tfor row in rows:\n\t\t\trat = row[2]\n\t\t\tif (rat == pointvalue):\n\t\t\t\tfile1.write(\"{},{},{},{} \\n\".format(\"rangeratingspart\"+str(table_suffix),row[0],row[1],row[2]))\n\t\t#file1.close()\n\t\tcur.execute('SELECT * from RoundRobinRatingsMetadata')\n\t\tnumberofpartitionslist = cur.fetchall()\n\t\tnumberofpartitions = numberofpartitionslist[0][0]\n\t\tfor l in range(numberofpartitions):\n\t\t\tcur.execute('SELECT * from RoundRobinRatingsPart'+str(l))\n\t\t\trows = []\n\t\t\trows = cur.fetchall()\n\t\t\t#file1 = open(\"PointQueryOut.txt\",\"w\")\n\t\t\tfor row in rows:\n\t\t\t\trat = row[2]\n\t\t\t\tif (rat == pointvalue):\n\t\t\t\t\tfile1.write (\"{},{},{},{} \\n\".format(\"roundrobinratingspart\" + str(l),row[0],row[1],row[2]))\n\t\tfile1.close()\n\n\n\telse:\n\t\tprint(\"please enter a valid rating value\")\n\t\t\n\tcur.close()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def get_partial_matched(n):
pi = [0] * len(n)
begin = 1
matched = 0
while begin + matched < len(n):
if n[begin + matched] == n[matched]:
matched += 1
pi[begin + matched - 1] = matched
else:
if matched == 0:
begin += 1
else:
begin += matched - pi[matched - 1]
matched = pi[matched - 1]
return pi
def get_common(h, n):
pi = get_partial_matched(n)
begin = 0
matched = 0
while begin + matched < len(h):
if matched < len(n) and h[begin + matched] == n[matched]:
matched += 1
if matched == len(n):
return len(n) - begin
else:
if matched == 0:
begin += 1
else:
begin += matched - pi[matched - 1]
matched = pi[matched - 1]
return 0
def solution(status):
n = len(status)
ret = 0
for i in range(n - 1):
clockwise = i % 2 == 0
if clockwise:
ret += get_common(2 * status[i], status[i + 1])
else:
ret += get_common(2 * status[i + 1], status[i])
return ret
C = int(input())
for _ in range(C):
N = int(input())
status = []
for _ in range(N + 1):
status.append(input())
print(solution(status))
|
normal
|
{
"blob_id": "16a77c45a58e31c575511146dfceeaef0a2bc3a7",
"index": 3640,
"step-1": "<mask token>\n\n\ndef get_common(h, n):\n pi = get_partial_matched(n)\n begin = 0\n matched = 0\n while begin + matched < len(h):\n if matched < len(n) and h[begin + matched] == n[matched]:\n matched += 1\n if matched == len(n):\n return len(n) - begin\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return 0\n\n\ndef solution(status):\n n = len(status)\n ret = 0\n for i in range(n - 1):\n clockwise = i % 2 == 0\n if clockwise:\n ret += get_common(2 * status[i], status[i + 1])\n else:\n ret += get_common(2 * status[i + 1], status[i])\n return ret\n\n\n<mask token>\n",
"step-2": "def get_partial_matched(n):\n pi = [0] * len(n)\n begin = 1\n matched = 0\n while begin + matched < len(n):\n if n[begin + matched] == n[matched]:\n matched += 1\n pi[begin + matched - 1] = matched\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return pi\n\n\ndef get_common(h, n):\n pi = get_partial_matched(n)\n begin = 0\n matched = 0\n while begin + matched < len(h):\n if matched < len(n) and h[begin + matched] == n[matched]:\n matched += 1\n if matched == len(n):\n return len(n) - begin\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return 0\n\n\ndef solution(status):\n n = len(status)\n ret = 0\n for i in range(n - 1):\n clockwise = i % 2 == 0\n if clockwise:\n ret += get_common(2 * status[i], status[i + 1])\n else:\n ret += get_common(2 * status[i + 1], status[i])\n return ret\n\n\n<mask token>\n",
"step-3": "def get_partial_matched(n):\n pi = [0] * len(n)\n begin = 1\n matched = 0\n while begin + matched < len(n):\n if n[begin + matched] == n[matched]:\n matched += 1\n pi[begin + matched - 1] = matched\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return pi\n\n\ndef get_common(h, n):\n pi = get_partial_matched(n)\n begin = 0\n matched = 0\n while begin + matched < len(h):\n if matched < len(n) and h[begin + matched] == n[matched]:\n matched += 1\n if matched == len(n):\n return len(n) - begin\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return 0\n\n\ndef solution(status):\n n = len(status)\n ret = 0\n for i in range(n - 1):\n clockwise = i % 2 == 0\n if clockwise:\n ret += get_common(2 * status[i], status[i + 1])\n else:\n ret += get_common(2 * status[i + 1], status[i])\n return ret\n\n\n<mask token>\nfor _ in range(C):\n N = int(input())\n status = []\n for _ in range(N + 1):\n status.append(input())\n print(solution(status))\n",
"step-4": "def get_partial_matched(n):\n pi = [0] * len(n)\n begin = 1\n matched = 0\n while begin + matched < len(n):\n if n[begin + matched] == n[matched]:\n matched += 1\n pi[begin + matched - 1] = matched\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return pi\n\n\ndef get_common(h, n):\n pi = get_partial_matched(n)\n begin = 0\n matched = 0\n while begin + matched < len(h):\n if matched < len(n) and h[begin + matched] == n[matched]:\n matched += 1\n if matched == len(n):\n return len(n) - begin\n elif matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return 0\n\n\ndef solution(status):\n n = len(status)\n ret = 0\n for i in range(n - 1):\n clockwise = i % 2 == 0\n if clockwise:\n ret += get_common(2 * status[i], status[i + 1])\n else:\n ret += get_common(2 * status[i + 1], status[i])\n return ret\n\n\nC = int(input())\nfor _ in range(C):\n N = int(input())\n status = []\n for _ in range(N + 1):\n status.append(input())\n print(solution(status))\n",
"step-5": "def get_partial_matched(n):\n pi = [0] * len(n)\n begin = 1\n matched = 0\n while begin + matched < len(n):\n if n[begin + matched] == n[matched]:\n matched += 1\n pi[begin + matched - 1] = matched\n else:\n if matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return pi\n\n\ndef get_common(h, n):\n pi = get_partial_matched(n)\n begin = 0\n matched = 0\n while begin + matched < len(h):\n if matched < len(n) and h[begin + matched] == n[matched]:\n matched += 1\n if matched == len(n):\n return len(n) - begin\n else:\n if matched == 0:\n begin += 1\n else:\n begin += matched - pi[matched - 1]\n matched = pi[matched - 1]\n return 0\n\n\ndef solution(status):\n n = len(status)\n ret = 0\n for i in range(n - 1):\n clockwise = i % 2 == 0\n if clockwise:\n ret += get_common(2 * status[i], status[i + 1])\n else:\n ret += get_common(2 * status[i + 1], status[i])\n return ret\n\n\nC = int(input())\n\nfor _ in range(C):\n N = int(input())\n status = []\n for _ in range(N + 1):\n status.append(input())\n print(solution(status))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
<|reserved_special_token_0|>
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input(
'Enter First Name of the contact you want to find: ')
find_by_firstname = Contact.get(Contact.firstname ==
find_contact_by_firstname)
print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +
' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +
find_by_firstname.address)
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == 'Create':
create_contact()
elif intro_question == 'Read':
read_contact()
elif intro_question == 'Delete':
delete_contact()
elif intro_question == 'Find':
find_contact()
elif intro_question == 'Update':
update_contact()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = PostgresqlDatabase('contacts', user='postgres', password='', host=
'localhost', port=5432)
intro_question = input(
'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '
)
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input(
'Enter First Name of the contact you want to find: ')
find_by_firstname = Contact.get(Contact.firstname ==
find_contact_by_firstname)
print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +
' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +
find_by_firstname.address)
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == 'Create':
create_contact()
elif intro_question == 'Read':
read_contact()
elif intro_question == 'Delete':
delete_contact()
elif intro_question == 'Find':
find_contact()
elif intro_question == 'Update':
update_contact()
<|reserved_special_token_1|>
from peewee import *
db = PostgresqlDatabase('contacts', user='postgres', password='', host=
'localhost', port=5432)
intro_question = input(
'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '
)
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input(
'Enter First Name of the contact you want to find: ')
find_by_firstname = Contact.get(Contact.firstname ==
find_contact_by_firstname)
print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +
' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +
find_by_firstname.address)
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == 'Create':
create_contact()
elif intro_question == 'Read':
read_contact()
elif intro_question == 'Delete':
delete_contact()
elif intro_question == 'Find':
find_contact()
elif intro_question == 'Update':
update_contact()
<|reserved_special_token_1|>
from peewee import *
db = PostgresqlDatabase('contacts', user='postgres', password='',
host='localhost', port=5432)
intro_question = input("What would you like to do with Contacts? Create? Read? Find? Delete? Update? ")
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + " " + contact.lastname + " " + contact.phone + " " + contact.email + " " + contact.address)
def create_contact():
contact_firstname = input("Enter First Name: ")
contact_lastname = input("Enter Last Name: ")
contact_phone = input("Enter Phone Number: ")
contact_email = input("Enter Email: ")
contact_address = input("Enter Address: ")
newcontact = Contact(firstname = contact_firstname, lastname = contact_lastname, phone = contact_phone, email = contact_email, address = contact_address)
newcontact.save()
print(newcontact.firstname + " " + newcontact.lastname + " " + newcontact.phone + " " + newcontact.email + " " + newcontact.address)
def update_contact():
update_find_by_firstname = input("Enter the First Name of the contact you want to update: ")
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input("Enter the new number: ")
updated_info.phone = new_phone
new_email = input("Enter new Email: ")
updated_info.email = new_email
new_address = input("Enter new Address: ")
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input("Enter First Name of the contact you want to find: ")
find_by_firstname = Contact.get(Contact.firstname == find_contact_by_firstname)
print(find_by_firstname.firstname + " " + find_by_firstname.lastname + " " + find_by_firstname.phone + " " + find_by_firstname.email + " " + find_by_firstname.address)
def delete_contact():
contact_name_delete = input("Enter First Name of the contact you want to delete: ")
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == "Create":
create_contact()
elif intro_question == "Read":
read_contact()
elif intro_question == "Delete":
delete_contact()
elif intro_question == "Find":
find_contact()
elif intro_question == "Update":
update_contact()
|
flexible
|
{
"blob_id": "07544d1eb039da0081716aa489fc1a0a5a200145",
"index": 1072,
"step-1": "<mask token>\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\n<mask token>\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\ndb.connect()\ndb.create_tables([Contact])\nif intro_question == 'Create':\n create_contact()\nelif intro_question == 'Read':\n read_contact()\nelif intro_question == 'Delete':\n delete_contact()\nelif intro_question == 'Find':\n find_contact()\nelif intro_question == 'Update':\n update_contact()\n",
"step-3": "<mask token>\ndb = PostgresqlDatabase('contacts', user='postgres', password='', host=\n 'localhost', port=5432)\nintro_question = input(\n 'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '\n )\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\ndb.connect()\ndb.create_tables([Contact])\nif intro_question == 'Create':\n create_contact()\nelif intro_question == 'Read':\n read_contact()\nelif intro_question == 'Delete':\n delete_contact()\nelif intro_question == 'Find':\n find_contact()\nelif intro_question == 'Update':\n update_contact()\n",
"step-4": "from peewee import *\ndb = PostgresqlDatabase('contacts', user='postgres', password='', host=\n 'localhost', port=5432)\nintro_question = input(\n 'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '\n )\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\ndb.connect()\ndb.create_tables([Contact])\nif intro_question == 'Create':\n create_contact()\nelif intro_question == 'Read':\n read_contact()\nelif intro_question == 'Delete':\n delete_contact()\nelif intro_question == 'Find':\n find_contact()\nelif intro_question == 'Update':\n update_contact()\n",
"step-5": "from peewee import *\n\ndb = PostgresqlDatabase('contacts', user='postgres', password='',\n host='localhost', port=5432)\n\nintro_question = input(\"What would you like to do with Contacts? Create? Read? Find? Delete? Update? \")\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + \" \" + contact.lastname + \" \" + contact.phone + \" \" + contact.email + \" \" + contact.address)\n\n\ndef create_contact():\n contact_firstname = input(\"Enter First Name: \")\n contact_lastname = input(\"Enter Last Name: \")\n contact_phone = input(\"Enter Phone Number: \")\n contact_email = input(\"Enter Email: \")\n contact_address = input(\"Enter Address: \")\n newcontact = Contact(firstname = contact_firstname, lastname = contact_lastname, phone = contact_phone, email = contact_email, address = contact_address)\n newcontact.save()\n print(newcontact.firstname + \" \" + newcontact.lastname + \" \" + newcontact.phone + \" \" + newcontact.email + \" \" + newcontact.address)\n\ndef update_contact():\n update_find_by_firstname = input(\"Enter the First Name of the contact you want to update: \")\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input(\"Enter the new number: \")\n updated_info.phone = new_phone\n new_email = input(\"Enter new Email: \")\n updated_info.email = new_email\n new_address = input(\"Enter new Address: \")\n updated_info.address = new_address\n updated_info.save() \n\n\ndef find_contact():\n find_contact_by_firstname = input(\"Enter First Name of the contact you want to find: \")\n find_by_firstname = Contact.get(Contact.firstname == find_contact_by_firstname)\n print(find_by_firstname.firstname + \" \" + find_by_firstname.lastname + \" \" + find_by_firstname.phone + \" \" + find_by_firstname.email + \" \" + find_by_firstname.address)\n\ndef delete_contact():\n contact_name_delete = input(\"Enter First Name of the contact you want to delete: \")\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\ndb.connect()\ndb.create_tables([Contact])\n\nif intro_question == \"Create\":\n create_contact()\n\n\nelif intro_question == \"Read\":\n read_contact()\n\n\nelif intro_question == \"Delete\":\n delete_contact()\n\nelif intro_question == \"Find\":\n find_contact()\n\nelif intro_question == \"Update\":\n update_contact()",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
<|reserved_special_token_0|>
_sym_db.RegisterMessage(GetUserInterestRequest)
<|reserved_special_token_0|>
_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')
)
<|reserved_special_token_0|>
_sym_db = _symbol_database.Default()
<|reserved_special_token_0|>
DESCRIPTOR = _descriptor.FileDescriptor(name=
'google/ads/googleads_v1/proto/services/user_interest_service.proto',
package='google.ads.googleads.v1.services', syntax='proto3',
serialized_options=_b(
"""
$com.google.ads.googleads.v1.servicesBUserInterestServiceProtoPZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢GAAª Google.Ads.GoogleAds.V1.ServicesÊ Google\\Ads\\GoogleAds\\V1\\Servicesê$Google::Ads::GoogleAds::V1::Services"""
), serialized_pb=_b(
'\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\x12 google.ads.googleads.v1.services\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\x1a\x1cgoogle/api/annotations.proto"/\n\x16GetUserInterestRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2Í\x01\n\x13UserInterestService\x12µ\x01\n\x0fGetUserInterest\x128.google.ads.googleads.v1.services.GetUserInterestRequest\x1a/.google.ads.googleads.v1.resources.UserInterest"7\x82Óä\x93\x021\x12//v1/{resource_name=customers/*/userInterests/*}Bÿ\x01\n$com.google.ads.googleads.v1.servicesB\x18UserInterestServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\x02\x03GAAª\x02 Google.Ads.GoogleAds.V1.ServicesÊ\x02 Google\\Ads\\GoogleAds\\V1\\Servicesê\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3'
), dependencies=[
google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR])
_GETUSERINTERESTREQUEST = _descriptor.Descriptor(name=
'GetUserInterestRequest', full_name=
'google.ads.googleads.v1.services.GetUserInterestRequest', filename=
None, file=DESCRIPTOR, containing_type=None, fields=[_descriptor.
FieldDescriptor(name='resource_name', full_name=
'google.ads.googleads.v1.services.GetUserInterestRequest.resource_name',
index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,
default_value=_b('').decode('utf-8'), message_type=None, enum_type=None,
containing_type=None, is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)], extensions=[], nested_types
=[], enum_types=[], serialized_options=None, is_extendable=False,
syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=195,
serialized_end=242)
DESCRIPTOR.message_types_by_name['GetUserInterestRequest'
] = _GETUSERINTERESTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetUserInterestRequest = _reflection.GeneratedProtocolMessageType(
'GetUserInterestRequest', (_message.Message,), dict(DESCRIPTOR=
_GETUSERINTERESTREQUEST, __module__=
'google.ads.googleads_v1.proto.services.user_interest_service_pb2',
__doc__=
"""Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].
Attributes:
resource_name:
Resource name of the UserInterest to fetch.
"""
))
_sym_db.RegisterMessage(GetUserInterestRequest)
DESCRIPTOR._options = None
_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(name=
'UserInterestService', full_name=
'google.ads.googleads.v1.services.UserInterestService', file=DESCRIPTOR,
index=0, serialized_options=None, serialized_start=245, serialized_end=
450, methods=[_descriptor.MethodDescriptor(name='GetUserInterest',
full_name=
'google.ads.googleads.v1.services.UserInterestService.GetUserInterest',
index=0, containing_service=None, input_type=_GETUSERINTERESTREQUEST,
output_type=
google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
._USERINTEREST, serialized_options=_b(
'\x82Óä\x93\x021\x12//v1/{resource_name=customers/*/userInterests/*}'))])
_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)
DESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE
<|reserved_special_token_1|>
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')
)
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(name=
'google/ads/googleads_v1/proto/services/user_interest_service.proto',
package='google.ads.googleads.v1.services', syntax='proto3',
serialized_options=_b(
"""
$com.google.ads.googleads.v1.servicesBUserInterestServiceProtoPZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢GAAª Google.Ads.GoogleAds.V1.ServicesÊ Google\\Ads\\GoogleAds\\V1\\Servicesê$Google::Ads::GoogleAds::V1::Services"""
), serialized_pb=_b(
'\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\x12 google.ads.googleads.v1.services\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\x1a\x1cgoogle/api/annotations.proto"/\n\x16GetUserInterestRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2Í\x01\n\x13UserInterestService\x12µ\x01\n\x0fGetUserInterest\x128.google.ads.googleads.v1.services.GetUserInterestRequest\x1a/.google.ads.googleads.v1.resources.UserInterest"7\x82Óä\x93\x021\x12//v1/{resource_name=customers/*/userInterests/*}Bÿ\x01\n$com.google.ads.googleads.v1.servicesB\x18UserInterestServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\x02\x03GAAª\x02 Google.Ads.GoogleAds.V1.ServicesÊ\x02 Google\\Ads\\GoogleAds\\V1\\Servicesê\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3'
), dependencies=[
google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR])
_GETUSERINTERESTREQUEST = _descriptor.Descriptor(name=
'GetUserInterestRequest', full_name=
'google.ads.googleads.v1.services.GetUserInterestRequest', filename=
None, file=DESCRIPTOR, containing_type=None, fields=[_descriptor.
FieldDescriptor(name='resource_name', full_name=
'google.ads.googleads.v1.services.GetUserInterestRequest.resource_name',
index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,
default_value=_b('').decode('utf-8'), message_type=None, enum_type=None,
containing_type=None, is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)], extensions=[], nested_types
=[], enum_types=[], serialized_options=None, is_extendable=False,
syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=195,
serialized_end=242)
DESCRIPTOR.message_types_by_name['GetUserInterestRequest'
] = _GETUSERINTERESTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetUserInterestRequest = _reflection.GeneratedProtocolMessageType(
'GetUserInterestRequest', (_message.Message,), dict(DESCRIPTOR=
_GETUSERINTERESTREQUEST, __module__=
'google.ads.googleads_v1.proto.services.user_interest_service_pb2',
__doc__=
"""Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].
Attributes:
resource_name:
Resource name of the UserInterest to fetch.
"""
))
_sym_db.RegisterMessage(GetUserInterestRequest)
DESCRIPTOR._options = None
_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(name=
'UserInterestService', full_name=
'google.ads.googleads.v1.services.UserInterestService', file=DESCRIPTOR,
index=0, serialized_options=None, serialized_start=245, serialized_end=
450, methods=[_descriptor.MethodDescriptor(name='GetUserInterest',
full_name=
'google.ads.googleads.v1.services.UserInterestService.GetUserInterest',
index=0, containing_service=None, input_type=_GETUSERINTERESTREQUEST,
output_type=
google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
._USERINTEREST, serialized_options=_b(
'\x82Óä\x93\x021\x12//v1/{resource_name=customers/*/userInterests/*}'))])
_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)
DESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/user_interest_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/user_interest_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB\030UserInterestServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\x12 google.ads.googleads.v1.services\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\x1a\x1cgoogle/api/annotations.proto\"/\n\x16GetUserInterestRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xcd\x01\n\x13UserInterestService\x12\xb5\x01\n\x0fGetUserInterest\x12\x38.google.ads.googleads.v1.services.GetUserInterestRequest\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{resource_name=customers/*/userInterests/*}B\xff\x01\n$com.google.ads.googleads.v1.servicesB\x18UserInterestServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETUSERINTERESTREQUEST = _descriptor.Descriptor(
name='GetUserInterestRequest',
full_name='google.ads.googleads.v1.services.GetUserInterestRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetUserInterestRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=242,
)
DESCRIPTOR.message_types_by_name['GetUserInterestRequest'] = _GETUSERINTERESTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetUserInterestRequest = _reflection.GeneratedProtocolMessageType('GetUserInterestRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUSERINTERESTREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.user_interest_service_pb2'
,
__doc__ = """Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].
Attributes:
resource_name:
Resource name of the UserInterest to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetUserInterestRequest)
))
_sym_db.RegisterMessage(GetUserInterestRequest)
DESCRIPTOR._options = None
_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(
name='UserInterestService',
full_name='google.ads.googleads.v1.services.UserInterestService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=245,
serialized_end=450,
methods=[
_descriptor.MethodDescriptor(
name='GetUserInterest',
full_name='google.ads.googleads.v1.services.UserInterestService.GetUserInterest',
index=0,
containing_service=None,
input_type=_GETUSERINTERESTREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2._USERINTEREST,
serialized_options=_b('\202\323\344\223\0021\022//v1/{resource_name=customers/*/userInterests/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)
DESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE
# @@protoc_insertion_point(module_scope)
|
flexible
|
{
"blob_id": "654586443e96f84aae70b3ce3263b0458a27334b",
"index": 473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n<mask token>\n_sym_db.RegisterMessage(GetUserInterestRequest)\n<mask token>\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\n<mask token>\n",
"step-3": "<mask token>\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\n<mask token>\n_sym_db = _symbol_database.Default()\n<mask token>\nDESCRIPTOR = _descriptor.FileDescriptor(name=\n 'google/ads/googleads_v1/proto/services/user_interest_service.proto',\n package='google.ads.googleads.v1.services', syntax='proto3',\n serialized_options=_b(\n \"\"\"\n$com.google.ads.googleads.v1.servicesB\u0018UserInterestServiceProtoP\u0001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\u0002\u0003GAAª\u0002 Google.Ads.GoogleAds.V1.ServicesÊ\u0002 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\u0002$Google::Ads::GoogleAds::V1::Services\"\"\"\n ), serialized_pb=_b(\n '\\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\\x12 google.ads.googleads.v1.services\\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\\x1a\\x1cgoogle/api/annotations.proto\"/\\n\\x16GetUserInterestRequest\\x12\\x15\\n\\rresource_name\\x18\\x01 \\x01(\\t2Í\\x01\\n\\x13UserInterestService\\x12µ\\x01\\n\\x0fGetUserInterest\\x128.google.ads.googleads.v1.services.GetUserInterestRequest\\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}Bÿ\\x01\\n$com.google.ads.googleads.v1.servicesB\\x18UserInterestServiceProtoP\\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\\x02\\x03GAAª\\x02 Google.Ads.GoogleAds.V1.ServicesÊ\\x02 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\\x02$Google::Ads::GoogleAds::V1::Servicesb\\x06proto3'\n ), dependencies=[\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n .DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR])\n_GETUSERINTERESTREQUEST = _descriptor.Descriptor(name=\n 'GetUserInterestRequest', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest', filename=\n None, file=DESCRIPTOR, containing_type=None, fields=[_descriptor.\n FieldDescriptor(name='resource_name', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest.resource_name',\n index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,\n default_value=_b('').decode('utf-8'), message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)], extensions=[], nested_types\n =[], enum_types=[], serialized_options=None, is_extendable=False,\n syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=195,\n serialized_end=242)\nDESCRIPTOR.message_types_by_name['GetUserInterestRequest'\n ] = _GETUSERINTERESTREQUEST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nGetUserInterestRequest = _reflection.GeneratedProtocolMessageType(\n 'GetUserInterestRequest', (_message.Message,), dict(DESCRIPTOR=\n _GETUSERINTERESTREQUEST, __module__=\n 'google.ads.googleads_v1.proto.services.user_interest_service_pb2',\n __doc__=\n \"\"\"Request message for\n [UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].\n \n \n Attributes:\n resource_name:\n Resource name of the UserInterest to fetch.\n \"\"\"\n ))\n_sym_db.RegisterMessage(GetUserInterestRequest)\nDESCRIPTOR._options = None\n_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(name=\n 'UserInterestService', full_name=\n 'google.ads.googleads.v1.services.UserInterestService', file=DESCRIPTOR,\n index=0, serialized_options=None, serialized_start=245, serialized_end=\n 450, methods=[_descriptor.MethodDescriptor(name='GetUserInterest',\n full_name=\n 'google.ads.googleads.v1.services.UserInterestService.GetUserInterest',\n index=0, containing_service=None, input_type=_GETUSERINTERESTREQUEST,\n output_type=\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n ._USERINTEREST, serialized_options=_b(\n '\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}'))])\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\nDESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE\n",
"step-4": "import sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n_sym_db = _symbol_database.Default()\nfrom google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\nfrom google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2\nDESCRIPTOR = _descriptor.FileDescriptor(name=\n 'google/ads/googleads_v1/proto/services/user_interest_service.proto',\n package='google.ads.googleads.v1.services', syntax='proto3',\n serialized_options=_b(\n \"\"\"\n$com.google.ads.googleads.v1.servicesB\u0018UserInterestServiceProtoP\u0001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\u0002\u0003GAAª\u0002 Google.Ads.GoogleAds.V1.ServicesÊ\u0002 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\u0002$Google::Ads::GoogleAds::V1::Services\"\"\"\n ), serialized_pb=_b(\n '\\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\\x12 google.ads.googleads.v1.services\\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\\x1a\\x1cgoogle/api/annotations.proto\"/\\n\\x16GetUserInterestRequest\\x12\\x15\\n\\rresource_name\\x18\\x01 \\x01(\\t2Í\\x01\\n\\x13UserInterestService\\x12µ\\x01\\n\\x0fGetUserInterest\\x128.google.ads.googleads.v1.services.GetUserInterestRequest\\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}Bÿ\\x01\\n$com.google.ads.googleads.v1.servicesB\\x18UserInterestServiceProtoP\\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\\x02\\x03GAAª\\x02 Google.Ads.GoogleAds.V1.ServicesÊ\\x02 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\\x02$Google::Ads::GoogleAds::V1::Servicesb\\x06proto3'\n ), dependencies=[\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n .DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR])\n_GETUSERINTERESTREQUEST = _descriptor.Descriptor(name=\n 'GetUserInterestRequest', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest', filename=\n None, file=DESCRIPTOR, containing_type=None, fields=[_descriptor.\n FieldDescriptor(name='resource_name', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest.resource_name',\n index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,\n default_value=_b('').decode('utf-8'), message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)], extensions=[], nested_types\n =[], enum_types=[], serialized_options=None, is_extendable=False,\n syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=195,\n serialized_end=242)\nDESCRIPTOR.message_types_by_name['GetUserInterestRequest'\n ] = _GETUSERINTERESTREQUEST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nGetUserInterestRequest = _reflection.GeneratedProtocolMessageType(\n 'GetUserInterestRequest', (_message.Message,), dict(DESCRIPTOR=\n _GETUSERINTERESTREQUEST, __module__=\n 'google.ads.googleads_v1.proto.services.user_interest_service_pb2',\n __doc__=\n \"\"\"Request message for\n [UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].\n \n \n Attributes:\n resource_name:\n Resource name of the UserInterest to fetch.\n \"\"\"\n ))\n_sym_db.RegisterMessage(GetUserInterestRequest)\nDESCRIPTOR._options = None\n_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(name=\n 'UserInterestService', full_name=\n 'google.ads.googleads.v1.services.UserInterestService', file=DESCRIPTOR,\n index=0, serialized_options=None, serialized_start=245, serialized_end=\n 450, methods=[_descriptor.MethodDescriptor(name='GetUserInterest',\n full_name=\n 'google.ads.googleads.v1.services.UserInterestService.GetUserInterest',\n index=0, containing_service=None, input_type=_GETUSERINTERESTREQUEST,\n output_type=\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n ._USERINTEREST, serialized_options=_b(\n '\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}'))])\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\nDESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: google/ads/googleads_v1/proto/services/user_interest_service.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nfrom google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\nfrom google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='google/ads/googleads_v1/proto/services/user_interest_service.proto',\n package='google.ads.googleads.v1.services',\n syntax='proto3',\n serialized_options=_b('\\n$com.google.ads.googleads.v1.servicesB\\030UserInterestServiceProtoP\\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\\242\\002\\003GAA\\252\\002 Google.Ads.GoogleAds.V1.Services\\312\\002 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Services\\352\\002$Google::Ads::GoogleAds::V1::Services'),\n serialized_pb=_b('\\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\\x12 google.ads.googleads.v1.services\\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\\x1a\\x1cgoogle/api/annotations.proto\\\"/\\n\\x16GetUserInterestRequest\\x12\\x15\\n\\rresource_name\\x18\\x01 \\x01(\\t2\\xcd\\x01\\n\\x13UserInterestService\\x12\\xb5\\x01\\n\\x0fGetUserInterest\\x12\\x38.google.ads.googleads.v1.services.GetUserInterestRequest\\x1a/.google.ads.googleads.v1.resources.UserInterest\\\"7\\x82\\xd3\\xe4\\x93\\x02\\x31\\x12//v1/{resource_name=customers/*/userInterests/*}B\\xff\\x01\\n$com.google.ads.googleads.v1.servicesB\\x18UserInterestServiceProtoP\\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\\xa2\\x02\\x03GAA\\xaa\\x02 Google.Ads.GoogleAds.V1.Services\\xca\\x02 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Services\\xea\\x02$Google::Ads::GoogleAds::V1::Servicesb\\x06proto3')\n ,\n dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])\n\n\n\n\n_GETUSERINTERESTREQUEST = _descriptor.Descriptor(\n name='GetUserInterestRequest',\n full_name='google.ads.googleads.v1.services.GetUserInterestRequest',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='resource_name', full_name='google.ads.googleads.v1.services.GetUserInterestRequest.resource_name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=195,\n serialized_end=242,\n)\n\nDESCRIPTOR.message_types_by_name['GetUserInterestRequest'] = _GETUSERINTERESTREQUEST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nGetUserInterestRequest = _reflection.GeneratedProtocolMessageType('GetUserInterestRequest', (_message.Message,), dict(\n DESCRIPTOR = _GETUSERINTERESTREQUEST,\n __module__ = 'google.ads.googleads_v1.proto.services.user_interest_service_pb2'\n ,\n __doc__ = \"\"\"Request message for\n [UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].\n \n \n Attributes:\n resource_name:\n Resource name of the UserInterest to fetch.\n \"\"\",\n # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetUserInterestRequest)\n ))\n_sym_db.RegisterMessage(GetUserInterestRequest)\n\n\nDESCRIPTOR._options = None\n\n_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(\n name='UserInterestService',\n full_name='google.ads.googleads.v1.services.UserInterestService',\n file=DESCRIPTOR,\n index=0,\n serialized_options=None,\n serialized_start=245,\n serialized_end=450,\n methods=[\n _descriptor.MethodDescriptor(\n name='GetUserInterest',\n full_name='google.ads.googleads.v1.services.UserInterestService.GetUserInterest',\n index=0,\n containing_service=None,\n input_type=_GETUSERINTERESTREQUEST,\n output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2._USERINTEREST,\n serialized_options=_b('\\202\\323\\344\\223\\0021\\022//v1/{resource_name=customers/*/userInterests/*}'),\n ),\n])\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\n\nDESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE\n\n# @@protoc_insertion_point(module_scope)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run() ->None:
os.environ['TZ'] = 'Europe/Brussels'
if sys.platform != 'win32':
from time import tzset
tzset()
print(datetime.now())
load_dotenv()
Log.setup()
token = os.getenv('DISCORD_BOT_TOKEN')
assert token, 'Could not find any dokbot bot token'
intents = discord.Intents.default()
intents.members = True
prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'
bot = DokBot(command_prefix=prefix, intents=intents)
bot.add_cog(DokBotCog(bot))
bot.add_cog(EventCog(bot))
@bot.event
async def on_ready():
logging.getLogger().info(f'{bot.user.name} has connected.')
try:
bot.run(token)
except InvalidStatusCode as e:
error_message = f'Could not start client {e}\n{traceback.format_exc()}'
Log.error(error_message)
<|reserved_special_token_1|>
from websockets.exceptions import InvalidStatusCode
from dokbot.DokBotCog import DokBotCog
from events.EventCog import EventCog
from dotenv import load_dotenv
from datetime import datetime
from .DokBot import DokBot
import utils.Logger as Log
import logging
import os
import sys
import traceback
import discord
def run() ->None:
os.environ['TZ'] = 'Europe/Brussels'
if sys.platform != 'win32':
from time import tzset
tzset()
print(datetime.now())
load_dotenv()
Log.setup()
token = os.getenv('DISCORD_BOT_TOKEN')
assert token, 'Could not find any dokbot bot token'
intents = discord.Intents.default()
intents.members = True
prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'
bot = DokBot(command_prefix=prefix, intents=intents)
bot.add_cog(DokBotCog(bot))
bot.add_cog(EventCog(bot))
@bot.event
async def on_ready():
logging.getLogger().info(f'{bot.user.name} has connected.')
try:
bot.run(token)
except InvalidStatusCode as e:
error_message = f'Could not start client {e}\n{traceback.format_exc()}'
Log.error(error_message)
<|reserved_special_token_1|>
# BotSetup.py
from websockets.exceptions import InvalidStatusCode
from dokbot.DokBotCog import DokBotCog
from events.EventCog import EventCog
from dotenv import load_dotenv
from datetime import datetime
from .DokBot import DokBot
import utils.Logger as Log
import logging
import os
import sys
import traceback
import discord
def run() -> None:
os.environ['TZ'] = 'Europe/Brussels'
if sys.platform != 'win32':
from time import tzset
tzset()
print(datetime.now())
load_dotenv()
Log.setup()
token = os.getenv('DISCORD_BOT_TOKEN')
assert token, "Could not find any dokbot bot token"
intents = discord.Intents.default()
intents.members = True
prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'
bot = DokBot(command_prefix=prefix, intents=intents)
bot.add_cog(DokBotCog(bot))
bot.add_cog(EventCog(bot))
@bot.event
async def on_ready():
logging.getLogger().info(f'{bot.user.name} has connected.')
#
# @discord_client.event
# async def on_message(message: discord.Message) -> None:
# if not discord_client.is_ready() or message.author == discord_client.user:
# return
# try:
# await command_runner.run_command_for_message(message)
# except Exception as ex:
# await handle_exception(ex, author=message.author, content=message.content)
#
# @discord_client.event
# async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:
# if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():
# return
# try:
# await signup_character(client=discord_client, reaction_event=reaction_event)
# except Exception as ex:
# user = await discord_client.fetch_user(reaction_event.user_id)
# await handle_exception(ex, author=user, content="Raid signup failed")
#
# async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:
# Log.error(f"{author}, {content}, {ex}\n{traceback.format_exc()}")
# if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):
# await author.send(ex.message)
# else:
# global maintainer
# if maintainer is None:
# maintainer = await discord_client.fetch_user(MAINTAINER_ID)
# await author.send(f"There were internal difficulties. Sending a message to {maintainer.display_name}")
# await maintainer.send(f'{author.display_name}, {content}, {ex}')
#
try:
bot.run(token)
except InvalidStatusCode as e:
error_message = f"Could not start client {e}\n{traceback.format_exc()}"
Log.error(error_message)
|
flexible
|
{
"blob_id": "a7123fa221555b15162dbab0d93a86965190b805",
"index": 4141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run() ->None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n print(datetime.now())\n load_dotenv()\n Log.setup()\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, 'Could not find any dokbot bot token'\n intents = discord.Intents.default()\n intents.members = True\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f'Could not start client {e}\\n{traceback.format_exc()}'\n Log.error(error_message)\n",
"step-3": "from websockets.exceptions import InvalidStatusCode\nfrom dokbot.DokBotCog import DokBotCog\nfrom events.EventCog import EventCog\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom .DokBot import DokBot\nimport utils.Logger as Log\nimport logging\nimport os\nimport sys\nimport traceback\nimport discord\n\n\ndef run() ->None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n print(datetime.now())\n load_dotenv()\n Log.setup()\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, 'Could not find any dokbot bot token'\n intents = discord.Intents.default()\n intents.members = True\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f'Could not start client {e}\\n{traceback.format_exc()}'\n Log.error(error_message)\n",
"step-4": "# BotSetup.py\nfrom websockets.exceptions import InvalidStatusCode\nfrom dokbot.DokBotCog import DokBotCog\nfrom events.EventCog import EventCog\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom .DokBot import DokBot\n\nimport utils.Logger as Log\nimport logging\nimport os\nimport sys\nimport traceback\nimport discord\n\n\ndef run() -> None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n\n print(datetime.now())\n load_dotenv()\n Log.setup()\n\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, \"Could not find any dokbot bot token\"\n\n intents = discord.Intents.default()\n intents.members = True\n\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n\n #\n # @discord_client.event\n # async def on_message(message: discord.Message) -> None:\n # if not discord_client.is_ready() or message.author == discord_client.user:\n # return\n # try:\n # await command_runner.run_command_for_message(message)\n # except Exception as ex:\n # await handle_exception(ex, author=message.author, content=message.content)\n #\n # @discord_client.event\n # async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:\n # if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():\n # return\n # try:\n # await signup_character(client=discord_client, reaction_event=reaction_event)\n # except Exception as ex:\n # user = await discord_client.fetch_user(reaction_event.user_id)\n # await handle_exception(ex, author=user, content=\"Raid signup failed\")\n #\n # async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:\n # Log.error(f\"{author}, {content}, {ex}\\n{traceback.format_exc()}\")\n # if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):\n # await author.send(ex.message)\n # else:\n # global maintainer\n # if maintainer is None:\n # maintainer = await discord_client.fetch_user(MAINTAINER_ID)\n # await author.send(f\"There were internal difficulties. Sending a message to {maintainer.display_name}\")\n # await maintainer.send(f'{author.display_name}, {content}, {ex}')\n #\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f\"Could not start client {e}\\n{traceback.format_exc()}\"\n Log.error(error_message)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filenames.sort()
<|reserved_special_token_0|>
for filename in filenames:
infile = open('data/SENTIMENT_test/' + filename, errors='ignore')
infiletext = infile.read()
infiletext = infiletext.replace('\n', ' ')
infiletext = infiletext.translate(remove_punctuation_map)
outfile.write(infiletext + '\n')
infile.close()
outfile.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filenames = os.listdir('data/SENTIMENT_test')
filenames.sort()
outfile = open('sentiment_test.txt', 'w')
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
for filename in filenames:
infile = open('data/SENTIMENT_test/' + filename, errors='ignore')
infiletext = infile.read()
infiletext = infiletext.replace('\n', ' ')
infiletext = infiletext.translate(remove_punctuation_map)
outfile.write(infiletext + '\n')
infile.close()
outfile.close()
<|reserved_special_token_1|>
import os
import string
filenames = os.listdir('data/SENTIMENT_test')
filenames.sort()
outfile = open('sentiment_test.txt', 'w')
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
for filename in filenames:
infile = open('data/SENTIMENT_test/' + filename, errors='ignore')
infiletext = infile.read()
infiletext = infiletext.replace('\n', ' ')
infiletext = infiletext.translate(remove_punctuation_map)
outfile.write(infiletext + '\n')
infile.close()
outfile.close()
|
flexible
|
{
"blob_id": "6434e427c9015544985a38104cffeaa10866b9ea",
"index": 4585,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfilenames.sort()\n<mask token>\nfor filename in filenames:\n infile = open('data/SENTIMENT_test/' + filename, errors='ignore')\n infiletext = infile.read()\n infiletext = infiletext.replace('\\n', ' ')\n infiletext = infiletext.translate(remove_punctuation_map)\n outfile.write(infiletext + '\\n')\n infile.close()\noutfile.close()\n",
"step-3": "<mask token>\nfilenames = os.listdir('data/SENTIMENT_test')\nfilenames.sort()\noutfile = open('sentiment_test.txt', 'w')\nremove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\nfor filename in filenames:\n infile = open('data/SENTIMENT_test/' + filename, errors='ignore')\n infiletext = infile.read()\n infiletext = infiletext.replace('\\n', ' ')\n infiletext = infiletext.translate(remove_punctuation_map)\n outfile.write(infiletext + '\\n')\n infile.close()\noutfile.close()\n",
"step-4": "import os\nimport string\nfilenames = os.listdir('data/SENTIMENT_test')\nfilenames.sort()\noutfile = open('sentiment_test.txt', 'w')\nremove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\nfor filename in filenames:\n infile = open('data/SENTIMENT_test/' + filename, errors='ignore')\n infiletext = infile.read()\n infiletext = infiletext.replace('\\n', ' ')\n infiletext = infiletext.translate(remove_punctuation_map)\n outfile.write(infiletext + '\\n')\n infile.close()\noutfile.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django import forms
from django.forms import widgets
from tsuru_dashboard import settings
import requests
class ChangePasswordForm(forms.Form):
old = forms.CharField(widget=forms.PasswordInput())
new = forms.CharField(widget=forms.PasswordInput())
confirm = forms.CharField(widget=forms.PasswordInput())
class PasswordRecoveryForm(forms.Form):
email = forms.EmailField()
token = forms.CharField()
def send(self):
url = "{0}/users/{1}/password?token={2}".format(
settings.TSURU_HOST,
self.cleaned_data['email'],
self.cleaned_data['token']
)
requests.post(url)
class TokenRequestForm(forms.Form):
email = forms.EmailField()
def send(self):
url = "{0}/users/{1}/password".format(settings.TSURU_HOST,
self.cleaned_data['email'])
requests.post(url)
class LoginForm(forms.Form):
username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs={'placeholder': 'Username'}))
password = forms.CharField(widget=widgets.PasswordInput(attrs={'placeholder': 'Password'}), min_length=6)
class AddUserToTeamForm(forms.Form):
def __init__(self, teams=None, *args, **kwargs):
super(AddUserToTeamForm, self).__init__(*args, **kwargs)
if teams:
choices = []
for team in teams:
choices.append((team, team))
self.fields["team"].choices = choices
email = forms.EmailField(max_length=60)
team = forms.ChoiceField(choices=[])
class SignupForm(forms.Form):
email = forms.EmailField(max_length=60)
password = forms.CharField(widget=widgets.PasswordInput, min_length=6)
same_password_again = forms.CharField(widget=widgets.PasswordInput,
min_length=6)
def clean(self):
cleaned_data = super(SignupForm, self).clean()
password = cleaned_data.get("password")
same_password_again = cleaned_data.get("same_password_again")
if not password == same_password_again:
msg = "You must type the same password twice!"
self._errors["same_password_again"] = self.error_class([msg])
raise forms.ValidationError(msg)
return cleaned_data
class KeyForm(forms.Form):
name = forms.CharField()
key = forms.CharField(widget=forms.Textarea)
|
normal
|
{
"blob_id": "27fc11ae68531c7dbafdcf134f0eef019210e2de",
"index": 8347,
"step-1": "<mask token>\n\n\nclass PasswordRecoveryForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TokenRequestForm(forms.Form):\n email = forms.EmailField()\n\n def send(self):\n url = '{0}/users/{1}/password'.format(settings.TSURU_HOST, self.\n cleaned_data['email'])\n requests.post(url)\n\n\nclass LoginForm(forms.Form):\n username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs\n ={'placeholder': 'Username'}))\n password = forms.CharField(widget=widgets.PasswordInput(attrs={\n 'placeholder': 'Password'}), min_length=6)\n\n\nclass AddUserToTeamForm(forms.Form):\n\n def __init__(self, teams=None, *args, **kwargs):\n super(AddUserToTeamForm, self).__init__(*args, **kwargs)\n if teams:\n choices = []\n for team in teams:\n choices.append((team, team))\n self.fields['team'].choices = choices\n email = forms.EmailField(max_length=60)\n team = forms.ChoiceField(choices=[])\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(max_length=60)\n password = forms.CharField(widget=widgets.PasswordInput, min_length=6)\n same_password_again = forms.CharField(widget=widgets.PasswordInput,\n min_length=6)\n\n def clean(self):\n cleaned_data = super(SignupForm, self).clean()\n password = cleaned_data.get('password')\n same_password_again = cleaned_data.get('same_password_again')\n if not password == same_password_again:\n msg = 'You must type the same password twice!'\n self._errors['same_password_again'] = self.error_class([msg])\n raise forms.ValidationError(msg)\n return cleaned_data\n\n\nclass KeyForm(forms.Form):\n name = forms.CharField()\n key = forms.CharField(widget=forms.Textarea)\n",
"step-2": "<mask token>\n\n\nclass PasswordRecoveryForm(forms.Form):\n <mask token>\n <mask token>\n\n def send(self):\n url = '{0}/users/{1}/password?token={2}'.format(settings.TSURU_HOST,\n self.cleaned_data['email'], self.cleaned_data['token'])\n requests.post(url)\n\n\nclass TokenRequestForm(forms.Form):\n email = forms.EmailField()\n\n def send(self):\n url = '{0}/users/{1}/password'.format(settings.TSURU_HOST, self.\n cleaned_data['email'])\n requests.post(url)\n\n\nclass LoginForm(forms.Form):\n username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs\n ={'placeholder': 'Username'}))\n password = forms.CharField(widget=widgets.PasswordInput(attrs={\n 'placeholder': 'Password'}), min_length=6)\n\n\nclass AddUserToTeamForm(forms.Form):\n\n def __init__(self, teams=None, *args, **kwargs):\n super(AddUserToTeamForm, self).__init__(*args, **kwargs)\n if teams:\n choices = []\n for team in teams:\n choices.append((team, team))\n self.fields['team'].choices = choices\n email = forms.EmailField(max_length=60)\n team = forms.ChoiceField(choices=[])\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(max_length=60)\n password = forms.CharField(widget=widgets.PasswordInput, min_length=6)\n same_password_again = forms.CharField(widget=widgets.PasswordInput,\n min_length=6)\n\n def clean(self):\n cleaned_data = super(SignupForm, self).clean()\n password = cleaned_data.get('password')\n same_password_again = cleaned_data.get('same_password_again')\n if not password == same_password_again:\n msg = 'You must type the same password twice!'\n self._errors['same_password_again'] = self.error_class([msg])\n raise forms.ValidationError(msg)\n return cleaned_data\n\n\nclass KeyForm(forms.Form):\n name = forms.CharField()\n key = forms.CharField(widget=forms.Textarea)\n",
"step-3": "<mask token>\n\n\nclass ChangePasswordForm(forms.Form):\n old = forms.CharField(widget=forms.PasswordInput())\n new = forms.CharField(widget=forms.PasswordInput())\n confirm = forms.CharField(widget=forms.PasswordInput())\n\n\nclass PasswordRecoveryForm(forms.Form):\n email = forms.EmailField()\n token = forms.CharField()\n\n def send(self):\n url = '{0}/users/{1}/password?token={2}'.format(settings.TSURU_HOST,\n self.cleaned_data['email'], self.cleaned_data['token'])\n requests.post(url)\n\n\nclass TokenRequestForm(forms.Form):\n email = forms.EmailField()\n\n def send(self):\n url = '{0}/users/{1}/password'.format(settings.TSURU_HOST, self.\n cleaned_data['email'])\n requests.post(url)\n\n\nclass LoginForm(forms.Form):\n username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs\n ={'placeholder': 'Username'}))\n password = forms.CharField(widget=widgets.PasswordInput(attrs={\n 'placeholder': 'Password'}), min_length=6)\n\n\nclass AddUserToTeamForm(forms.Form):\n\n def __init__(self, teams=None, *args, **kwargs):\n super(AddUserToTeamForm, self).__init__(*args, **kwargs)\n if teams:\n choices = []\n for team in teams:\n choices.append((team, team))\n self.fields['team'].choices = choices\n email = forms.EmailField(max_length=60)\n team = forms.ChoiceField(choices=[])\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(max_length=60)\n password = forms.CharField(widget=widgets.PasswordInput, min_length=6)\n same_password_again = forms.CharField(widget=widgets.PasswordInput,\n min_length=6)\n\n def clean(self):\n cleaned_data = super(SignupForm, self).clean()\n password = cleaned_data.get('password')\n same_password_again = cleaned_data.get('same_password_again')\n if not password == same_password_again:\n msg = 'You must type the same password twice!'\n self._errors['same_password_again'] = self.error_class([msg])\n raise forms.ValidationError(msg)\n return cleaned_data\n\n\nclass KeyForm(forms.Form):\n name = forms.CharField()\n key = forms.CharField(widget=forms.Textarea)\n",
"step-4": "from django import forms\nfrom django.forms import widgets\nfrom tsuru_dashboard import settings\nimport requests\n\n\nclass ChangePasswordForm(forms.Form):\n old = forms.CharField(widget=forms.PasswordInput())\n new = forms.CharField(widget=forms.PasswordInput())\n confirm = forms.CharField(widget=forms.PasswordInput())\n\n\nclass PasswordRecoveryForm(forms.Form):\n email = forms.EmailField()\n token = forms.CharField()\n\n def send(self):\n url = '{0}/users/{1}/password?token={2}'.format(settings.TSURU_HOST,\n self.cleaned_data['email'], self.cleaned_data['token'])\n requests.post(url)\n\n\nclass TokenRequestForm(forms.Form):\n email = forms.EmailField()\n\n def send(self):\n url = '{0}/users/{1}/password'.format(settings.TSURU_HOST, self.\n cleaned_data['email'])\n requests.post(url)\n\n\nclass LoginForm(forms.Form):\n username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs\n ={'placeholder': 'Username'}))\n password = forms.CharField(widget=widgets.PasswordInput(attrs={\n 'placeholder': 'Password'}), min_length=6)\n\n\nclass AddUserToTeamForm(forms.Form):\n\n def __init__(self, teams=None, *args, **kwargs):\n super(AddUserToTeamForm, self).__init__(*args, **kwargs)\n if teams:\n choices = []\n for team in teams:\n choices.append((team, team))\n self.fields['team'].choices = choices\n email = forms.EmailField(max_length=60)\n team = forms.ChoiceField(choices=[])\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(max_length=60)\n password = forms.CharField(widget=widgets.PasswordInput, min_length=6)\n same_password_again = forms.CharField(widget=widgets.PasswordInput,\n min_length=6)\n\n def clean(self):\n cleaned_data = super(SignupForm, self).clean()\n password = cleaned_data.get('password')\n same_password_again = cleaned_data.get('same_password_again')\n if not password == same_password_again:\n msg = 'You must type the same password twice!'\n self._errors['same_password_again'] = self.error_class([msg])\n raise forms.ValidationError(msg)\n return cleaned_data\n\n\nclass KeyForm(forms.Form):\n name = forms.CharField()\n key = forms.CharField(widget=forms.Textarea)\n",
"step-5": "from django import forms\nfrom django.forms import widgets\nfrom tsuru_dashboard import settings\n\nimport requests\n\n\nclass ChangePasswordForm(forms.Form):\n old = forms.CharField(widget=forms.PasswordInput())\n new = forms.CharField(widget=forms.PasswordInput())\n confirm = forms.CharField(widget=forms.PasswordInput())\n\n\nclass PasswordRecoveryForm(forms.Form):\n email = forms.EmailField()\n token = forms.CharField()\n\n def send(self):\n url = \"{0}/users/{1}/password?token={2}\".format(\n settings.TSURU_HOST,\n self.cleaned_data['email'],\n self.cleaned_data['token']\n )\n requests.post(url)\n\n\nclass TokenRequestForm(forms.Form):\n email = forms.EmailField()\n\n def send(self):\n url = \"{0}/users/{1}/password\".format(settings.TSURU_HOST,\n self.cleaned_data['email'])\n requests.post(url)\n\n\nclass LoginForm(forms.Form):\n username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs={'placeholder': 'Username'}))\n password = forms.CharField(widget=widgets.PasswordInput(attrs={'placeholder': 'Password'}), min_length=6)\n\n\nclass AddUserToTeamForm(forms.Form):\n\n def __init__(self, teams=None, *args, **kwargs):\n super(AddUserToTeamForm, self).__init__(*args, **kwargs)\n if teams:\n choices = []\n for team in teams:\n choices.append((team, team))\n self.fields[\"team\"].choices = choices\n\n email = forms.EmailField(max_length=60)\n team = forms.ChoiceField(choices=[])\n\n\nclass SignupForm(forms.Form):\n email = forms.EmailField(max_length=60)\n password = forms.CharField(widget=widgets.PasswordInput, min_length=6)\n same_password_again = forms.CharField(widget=widgets.PasswordInput,\n min_length=6)\n\n def clean(self):\n cleaned_data = super(SignupForm, self).clean()\n password = cleaned_data.get(\"password\")\n same_password_again = cleaned_data.get(\"same_password_again\")\n\n if not password == same_password_again:\n msg = \"You must type the same password twice!\"\n self._errors[\"same_password_again\"] = self.error_class([msg])\n raise forms.ValidationError(msg)\n\n return cleaned_data\n\n\nclass KeyForm(forms.Form):\n name = forms.CharField()\n key = forms.CharField(widget=forms.Textarea)\n",
"step-ids": [
14,
15,
18,
19,
20
]
}
|
[
14,
15,
18,
19,
20
] |
from setuptools import setup, find_packages
setup(name='qn',
version='0.2.2',
description='Handy functions I use everyday.',
url='https://github.com/frlender/qn',
author='Qiaonan Duan',
author_email='geonann@gmail.com',
license='MIT',
packages=find_packages(),
# install_requires=[
# 'matplotlib',
# 'seaborn',
# 'numpy',
# 'scipy',
# 'pandas',
# 'PyYAML',
# 'matplotlib-venn',
# 'scikit-learn'
# ],
zip_safe=False)
|
normal
|
{
"blob_id": "3b307ae7f8b8b25c93eb2dc54b2603b1291b6232",
"index": 1789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='qn', version='0.2.2', description=\n 'Handy functions I use everyday.', url='https://github.com/frlender/qn',\n author='Qiaonan Duan', author_email='geonann@gmail.com', license='MIT',\n packages=find_packages(), zip_safe=False)\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='qn', version='0.2.2', description=\n 'Handy functions I use everyday.', url='https://github.com/frlender/qn',\n author='Qiaonan Duan', author_email='geonann@gmail.com', license='MIT',\n packages=find_packages(), zip_safe=False)\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(name='qn',\n version='0.2.2',\n description='Handy functions I use everyday.',\n url='https://github.com/frlender/qn',\n author='Qiaonan Duan',\n author_email='geonann@gmail.com',\n license='MIT',\n packages=find_packages(),\n # install_requires=[\n # 'matplotlib',\n # 'seaborn',\n # 'numpy',\n # 'scipy',\n # 'pandas',\n # 'PyYAML',\n # 'matplotlib-venn',\n # 'scikit-learn'\n # ],\n zip_safe=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestCRMcreateCustomer(TestCRM):
<|reserved_special_token_0|>
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCRMcreateCustomer(TestCRM):
def createCustomer(self):
self.driver.click('text= 客户 ')
self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'crm000001')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'c000001')
self.driver.click_index('class=android.view.View', 59)
self.driver.click('text=电话营销')
self.driver.click('text=保存')
self.driver.click_index('class=android.view.View', 10)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCRMcreateCustomer(TestCRM):
def createCustomer(self):
self.driver.click('text= 客户 ')
self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'crm000001')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'c000001')
self.driver.click_index('class=android.view.View', 59)
self.driver.click('text=电话营销')
self.driver.click('text=保存')
self.driver.click_index('class=android.view.View', 10)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == '__main__':
report_path = os.path.dirname(__file__
) + '/report/' + 'TestCRM_report.html'
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open
(report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
<|reserved_special_token_1|>
import os
import unittest
from HTMLTestRunner_cn import HTMLTestRunner
from time import sleep
from framework.SunFlower import SunFlower
from testcase.TestCRM import TestCRM
class TestCRMcreateCustomer(TestCRM):
def createCustomer(self):
self.driver.click('text= 客户 ')
self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'crm000001')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'c000001')
self.driver.click_index('class=android.view.View', 59)
self.driver.click('text=电话营销')
self.driver.click('text=保存')
self.driver.click_index('class=android.view.View', 10)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == '__main__':
report_path = os.path.dirname(__file__
) + '/report/' + 'TestCRM_report.html'
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open
(report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
<|reserved_special_token_1|>
# -*- encoding:utf-8 -*-
import os
import unittest
from HTMLTestRunner_cn import HTMLTestRunner
from time import sleep
from framework.SunFlower import SunFlower
from testcase.TestCRM import TestCRM
class TestCRMcreateCustomer(TestCRM):
# 创建客户
def createCustomer(self):
# 点击客户图标
self.driver.click("text= 客户 ")
# 点击添加客户按钮
self.driver.click("text=sYVInwAAAABJRU5ErkJggg==")
#输入客户名称
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","crm000001")
#输入客户编号
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","c000001")
#选择客户信息来源
self.driver.click_index("class=android.view.View",59)
self.driver.click("text=电话营销")
#保存
self.driver.click("text=保存")
#点击返回
self.driver.click_index("class=android.view.View",10)
# sleep(5)
# # # 向上滑动屏幕
# # self.driver.swipe_up(n=3)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == "__main__":
report_path = os.path.dirname(__file__) + "/report/" + "TestCRM_report.html"
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title="悟空CRM测试报告", description="登录", stream=open(report_path, "wb"),
verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
|
flexible
|
{
"blob_id": "74bc530d53cd86c52c44ba8e98d4d8f502032340",
"index": 2423,
"step-1": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n <mask token>\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == '__main__':\n report_path = os.path.dirname(__file__\n ) + '/report/' + 'TestCRM_report.html'\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open\n (report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-4": "import os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\nfrom framework.SunFlower import SunFlower\nfrom testcase.TestCRM import TestCRM\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == '__main__':\n report_path = os.path.dirname(__file__\n ) + '/report/' + 'TestCRM_report.html'\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open\n (report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-5": "# -*- encoding:utf-8 -*-\nimport os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\n\nfrom framework.SunFlower import SunFlower\nfrom testcase.TestCRM import TestCRM\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n # 创建客户\n def createCustomer(self):\n\n # 点击客户图标\n self.driver.click(\"text= 客户 \")\n # 点击添加客户按钮\n self.driver.click(\"text=sYVInwAAAABJRU5ErkJggg==\")\n #输入客户名称\n self.driver.send_keys(\"xpath=//*[@text=\\\"请输入\\\"][1]\",\"crm000001\")\n\n #输入客户编号\n self.driver.send_keys(\"xpath=//*[@text=\\\"请输入\\\"][1]\",\"c000001\")\n #选择客户信息来源\n self.driver.click_index(\"class=android.view.View\",59)\n self.driver.click(\"text=电话营销\")\n #保存\n self.driver.click(\"text=保存\")\n #点击返回\n self.driver.click_index(\"class=android.view.View\",10)\n # sleep(5)\n # # # 向上滑动屏幕\n # # self.driver.swipe_up(n=3)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == \"__main__\":\n report_path = os.path.dirname(__file__) + \"/report/\" + \"TestCRM_report.html\"\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title=\"悟空CRM测试报告\", description=\"登录\", stream=open(report_path, \"wb\"),\n verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import datetime
from ..core.indicator import Indicator, IndicatorState
from ..core.toolwindow import ToolWindow
class HaakePhoenix(ToolWindow):
required_devices = ['haakephoenix']
def __init__(self, *args, **wargs):
self.indicators = {}
super().__init__(*args, **wargs)
def init_gui(self, *args, **kwargs):
statusgrid = self.builder.get_object('statusgrid')
for row, column, vn, label in [(0, 0, '_status', 'Status'),
(0, 1, 'setpoint', 'Target temperature'),
(0, 2, 'temperature', 'Temperature'),
(0, 3, 'pump_power', 'Pump speed'),
(0, 4, 'control_on', 'Temperature control'),
(1, 0, 'lowlimit', 'Low limit'),
(1, 1, 'highlimit', 'High limit'),
(1, 2, 'cooling_on', 'Cooling'),
(1, 3, 'control_external', 'Control'),
(1, 4, 'diffcontrol_on', 'Differential control')]:
self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)
statusgrid.attach(self.indicators[vn], column, row, 1, 1)
errorgrid = self.builder.get_object('errorgrid')
for row, column, vn, label in [(0, 0, 'external_pt100_error', 'External Pt100'), #
(0, 1, 'internal_pt100_error', 'Internal Pt100'), #
(0, 2, 'liquid_level_low_error', 'Liquid level'), #
(0, 3, 'liquid_level_alarm_error', 'Liquid level alarm'), #
(0, 4, 'cooling_error', 'Cooling system'), #
(1, 0, 'pump_overload_error', 'Pump'), #
(1, 1, 'external_alarm_error', 'External alarm'), #
(1, 2, 'overtemperature_error', 'Overtemperature'), #
(1, 3, 'main_relay_missing_error', 'Main relay'), #
(1, 4, 'faultstatus', 'Status flags')]: #
self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)
errorgrid.attach(self.indicators[vn], column, row, 1, 1)
othergrid = self.builder.get_object('othergrid')
for row, column, vn, label in [(0, 0, 'firmwareversion', 'Firmware version'), #
(0, 1, 'date', 'Date'), #
(0, 2, 'time', 'Time'), #
(0, 3, 'autostart', 'Autostart'), #
(0, 4, 'beep', 'Beep'), #
(1, 0, 'fuzzyid', 'Fuzzy identification'), #
(1, 1, 'fuzzycontrol', 'Fuzzy control'), #
(1, 2, 'fuzzystatus', 'Fuzzy status'), #
(1, 3, 'watchdog_on', 'Watchdog'), #
(1, 4, 'watchdog_setpoint', 'Watchdog setpoint')]: #
self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)
othergrid.attach(self.indicators[vn], column, row, 1, 1)
self.update_indicators()
def on_mainwidget_map(self, window):
if super().on_mainwidget_map(window):
return True
self.update_indicators()
def update_indicators(self):
dev = self.instrument.get_device('haakephoenix')
for vn in self.indicators:
self.on_device_variable_change(dev, vn, dev.get_variable(vn))
self.builder.get_object('setpoint_adjustment').set_value(
dev.get_variable('setpoint'))
self.builder.get_object('lowlimit_adjustment').set_value(
dev.get_variable('lowlimit'))
self.builder.get_object('highlimit_adjustment').set_value(
dev.get_variable('highlimit'))
def on_device_variable_change(self, device, variablename, newvalue):
if variablename in ['_status', 'firmwareversion', 'fuzzycontrol', 'date', 'time', 'faultstatus']:
self.indicators[variablename].set_value(str(newvalue), IndicatorState.NEUTRAL)
elif variablename in ['setpoint', 'temperature', 'lowlimit', 'highlimit']:
self.indicators[variablename].set_value('%.2f°C' % newvalue, IndicatorState.NEUTRAL)
elif variablename in ['control_on', 'cooling_on', 'diffcontrol_on', 'watchdog_on', 'beep', 'fuzzyid',
'fuzzystatus',
'autostart']:
self.indicators[variablename].set_value(['OFF', 'ON'][int(bool(newvalue))],
[IndicatorState.ERROR, IndicatorState.OK][int(bool(newvalue))])
elif variablename in ['pump_power']:
self.indicators[variablename].set_value('%.2f %%' % newvalue,
[IndicatorState.ERROR, IndicatorState.OK][newvalue > 0])
elif variablename in ['external_pt100_error', 'internal_pt100_error', 'liquid_level_low_error', 'cooling_error',
'main_relay_missing_error']:
self.indicators[variablename].set_value(['OK', 'ERROR'][int(bool(newvalue))],
[IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])
elif variablename in ['liquid_level_alarm_error', 'external_alarm_error', 'overtemperature_error']:
self.indicators[variablename].set_value(['OK', 'ALARM'][int(bool(newvalue))],
[IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])
elif variablename in ['pump_overload_error']:
self.indicators[variablename].set_value(['OK', 'OVERLOAD'][int(bool(newvalue))],
[IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])
elif variablename in ['watchdog_setpoint']:
self.indicators[variablename].set_value('%.2f sec' % newvalue, IndicatorState.UNKNOWN)
elif variablename in ['control_external']:
self.indicators[variablename].set_value(['Internal', 'External'][int(bool(newvalue))],
IndicatorState.NEUTRAL)
if variablename == 'fuzzyid':
self.builder.get_object('fuzzyid_switch').set_state(bool(newvalue))
elif variablename == 'pump_power':
self.builder.get_object('circulator_switch').set_state(newvalue > 0)
return False
def on_circulator_switch_state_set(self, switch, state):
dev = self.instrument.get_device('haakephoenix')
if state:
dev.execute_command('start')
else:
dev.execute_command('stop')
return True
def on_fuzzyid_switch_state_set(self, switch, state):
self.instrument.get_device('haakephoenix').set_variable('fuzzyid', state)
return True
def on_set_setpoint(self, button):
spinbutton = self.builder.get_object('setpoint_spin')
self.instrument.get_device('haakephoenix').set_variable('setpoint', spinbutton.get_value())
def on_set_lowlimit(self, button):
spinbutton = self.builder.get_object('lowlimit_spin')
self.instrument.get_device('haakephoenix').set_variable('lowlimit', spinbutton.get_value())
def on_set_highlimit(self, button):
spinbutton = self.builder.get_object('highlimit_spin')
self.instrument.get_device('haakephoenix').set_variable('highlimit', spinbutton.get_value())
def on_update_rtc(self, button):
now = datetime.datetime.now()
self.instrument.get_device('haakephoenix').set_variable('date', now.date())
self.instrument.get_device('haakephoenix').set_variable('time', now.time())
|
normal
|
{
"blob_id": "25aa0766505b22588107d44e15c3596e9383d4e9",
"index": 486,
"step-1": "<mask token>\n\n\nclass HaakePhoenix(ToolWindow):\n <mask token>\n\n def __init__(self, *args, **wargs):\n self.indicators = {}\n super().__init__(*args, **wargs)\n\n def init_gui(self, *args, **kwargs):\n statusgrid = self.builder.get_object('statusgrid')\n for row, column, vn, label in [(0, 0, '_status', 'Status'), (0, 1,\n 'setpoint', 'Target temperature'), (0, 2, 'temperature',\n 'Temperature'), (0, 3, 'pump_power', 'Pump speed'), (0, 4,\n 'control_on', 'Temperature control'), (1, 0, 'lowlimit',\n 'Low limit'), (1, 1, 'highlimit', 'High limit'), (1, 2,\n 'cooling_on', 'Cooling'), (1, 3, 'control_external', 'Control'),\n (1, 4, 'diffcontrol_on', 'Differential control')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n statusgrid.attach(self.indicators[vn], column, row, 1, 1)\n errorgrid = self.builder.get_object('errorgrid')\n for row, column, vn, label in [(0, 0, 'external_pt100_error',\n 'External Pt100'), (0, 1, 'internal_pt100_error',\n 'Internal Pt100'), (0, 2, 'liquid_level_low_error',\n 'Liquid level'), (0, 3, 'liquid_level_alarm_error',\n 'Liquid level alarm'), (0, 4, 'cooling_error', 'Cooling system'\n ), (1, 0, 'pump_overload_error', 'Pump'), (1, 1,\n 'external_alarm_error', 'External alarm'), (1, 2,\n 'overtemperature_error', 'Overtemperature'), (1, 3,\n 'main_relay_missing_error', 'Main relay'), (1, 4, 'faultstatus',\n 'Status flags')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n errorgrid.attach(self.indicators[vn], column, row, 1, 1)\n othergrid = self.builder.get_object('othergrid')\n for row, column, vn, label in [(0, 0, 'firmwareversion',\n 'Firmware version'), (0, 1, 'date', 'Date'), (0, 2, 'time',\n 'Time'), (0, 3, 'autostart', 'Autostart'), (0, 4, 'beep',\n 'Beep'), (1, 0, 'fuzzyid', 'Fuzzy identification'), (1, 1,\n 'fuzzycontrol', 'Fuzzy control'), (1, 2, 'fuzzystatus',\n 'Fuzzy status'), (1, 3, 'watchdog_on', 'Watchdog'), (1, 4,\n 'watchdog_setpoint', 'Watchdog setpoint')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n othergrid.attach(self.indicators[vn], column, row, 1, 1)\n self.update_indicators()\n\n def on_mainwidget_map(self, window):\n if super().on_mainwidget_map(window):\n return True\n self.update_indicators()\n <mask token>\n <mask token>\n <mask token>\n\n def on_fuzzyid_switch_state_set(self, switch, state):\n self.instrument.get_device('haakephoenix').set_variable('fuzzyid',\n state)\n return True\n\n def on_set_setpoint(self, button):\n spinbutton = self.builder.get_object('setpoint_spin')\n self.instrument.get_device('haakephoenix').set_variable('setpoint',\n spinbutton.get_value())\n\n def on_set_lowlimit(self, button):\n spinbutton = self.builder.get_object('lowlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('lowlimit',\n spinbutton.get_value())\n <mask token>\n\n def on_update_rtc(self, button):\n now = datetime.datetime.now()\n self.instrument.get_device('haakephoenix').set_variable('date', now\n .date())\n self.instrument.get_device('haakephoenix').set_variable('time', now\n .time())\n",
"step-2": "<mask token>\n\n\nclass HaakePhoenix(ToolWindow):\n <mask token>\n\n def __init__(self, *args, **wargs):\n self.indicators = {}\n super().__init__(*args, **wargs)\n\n def init_gui(self, *args, **kwargs):\n statusgrid = self.builder.get_object('statusgrid')\n for row, column, vn, label in [(0, 0, '_status', 'Status'), (0, 1,\n 'setpoint', 'Target temperature'), (0, 2, 'temperature',\n 'Temperature'), (0, 3, 'pump_power', 'Pump speed'), (0, 4,\n 'control_on', 'Temperature control'), (1, 0, 'lowlimit',\n 'Low limit'), (1, 1, 'highlimit', 'High limit'), (1, 2,\n 'cooling_on', 'Cooling'), (1, 3, 'control_external', 'Control'),\n (1, 4, 'diffcontrol_on', 'Differential control')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n statusgrid.attach(self.indicators[vn], column, row, 1, 1)\n errorgrid = self.builder.get_object('errorgrid')\n for row, column, vn, label in [(0, 0, 'external_pt100_error',\n 'External Pt100'), (0, 1, 'internal_pt100_error',\n 'Internal Pt100'), (0, 2, 'liquid_level_low_error',\n 'Liquid level'), (0, 3, 'liquid_level_alarm_error',\n 'Liquid level alarm'), (0, 4, 'cooling_error', 'Cooling system'\n ), (1, 0, 'pump_overload_error', 'Pump'), (1, 1,\n 'external_alarm_error', 'External alarm'), (1, 2,\n 'overtemperature_error', 'Overtemperature'), (1, 3,\n 'main_relay_missing_error', 'Main relay'), (1, 4, 'faultstatus',\n 'Status flags')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n errorgrid.attach(self.indicators[vn], column, row, 1, 1)\n othergrid = self.builder.get_object('othergrid')\n for row, column, vn, label in [(0, 0, 'firmwareversion',\n 'Firmware version'), (0, 1, 'date', 'Date'), (0, 2, 'time',\n 'Time'), (0, 3, 'autostart', 'Autostart'), (0, 4, 'beep',\n 'Beep'), (1, 0, 'fuzzyid', 'Fuzzy identification'), (1, 1,\n 'fuzzycontrol', 'Fuzzy control'), (1, 2, 'fuzzystatus',\n 'Fuzzy status'), (1, 3, 'watchdog_on', 'Watchdog'), (1, 4,\n 'watchdog_setpoint', 'Watchdog setpoint')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n othergrid.attach(self.indicators[vn], column, row, 1, 1)\n self.update_indicators()\n\n def on_mainwidget_map(self, window):\n if super().on_mainwidget_map(window):\n return True\n self.update_indicators()\n <mask token>\n <mask token>\n\n def on_circulator_switch_state_set(self, switch, state):\n dev = self.instrument.get_device('haakephoenix')\n if state:\n dev.execute_command('start')\n else:\n dev.execute_command('stop')\n return True\n\n def on_fuzzyid_switch_state_set(self, switch, state):\n self.instrument.get_device('haakephoenix').set_variable('fuzzyid',\n state)\n return True\n\n def on_set_setpoint(self, button):\n spinbutton = self.builder.get_object('setpoint_spin')\n self.instrument.get_device('haakephoenix').set_variable('setpoint',\n spinbutton.get_value())\n\n def on_set_lowlimit(self, button):\n spinbutton = self.builder.get_object('lowlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('lowlimit',\n spinbutton.get_value())\n\n def on_set_highlimit(self, button):\n spinbutton = self.builder.get_object('highlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('highlimit',\n spinbutton.get_value())\n\n def on_update_rtc(self, button):\n now = datetime.datetime.now()\n self.instrument.get_device('haakephoenix').set_variable('date', now\n .date())\n self.instrument.get_device('haakephoenix').set_variable('time', now\n .time())\n",
"step-3": "<mask token>\n\n\nclass HaakePhoenix(ToolWindow):\n required_devices = ['haakephoenix']\n\n def __init__(self, *args, **wargs):\n self.indicators = {}\n super().__init__(*args, **wargs)\n\n def init_gui(self, *args, **kwargs):\n statusgrid = self.builder.get_object('statusgrid')\n for row, column, vn, label in [(0, 0, '_status', 'Status'), (0, 1,\n 'setpoint', 'Target temperature'), (0, 2, 'temperature',\n 'Temperature'), (0, 3, 'pump_power', 'Pump speed'), (0, 4,\n 'control_on', 'Temperature control'), (1, 0, 'lowlimit',\n 'Low limit'), (1, 1, 'highlimit', 'High limit'), (1, 2,\n 'cooling_on', 'Cooling'), (1, 3, 'control_external', 'Control'),\n (1, 4, 'diffcontrol_on', 'Differential control')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n statusgrid.attach(self.indicators[vn], column, row, 1, 1)\n errorgrid = self.builder.get_object('errorgrid')\n for row, column, vn, label in [(0, 0, 'external_pt100_error',\n 'External Pt100'), (0, 1, 'internal_pt100_error',\n 'Internal Pt100'), (0, 2, 'liquid_level_low_error',\n 'Liquid level'), (0, 3, 'liquid_level_alarm_error',\n 'Liquid level alarm'), (0, 4, 'cooling_error', 'Cooling system'\n ), (1, 0, 'pump_overload_error', 'Pump'), (1, 1,\n 'external_alarm_error', 'External alarm'), (1, 2,\n 'overtemperature_error', 'Overtemperature'), (1, 3,\n 'main_relay_missing_error', 'Main relay'), (1, 4, 'faultstatus',\n 'Status flags')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n errorgrid.attach(self.indicators[vn], column, row, 1, 1)\n othergrid = self.builder.get_object('othergrid')\n for row, column, vn, label in [(0, 0, 'firmwareversion',\n 'Firmware version'), (0, 1, 'date', 'Date'), (0, 2, 'time',\n 'Time'), (0, 3, 'autostart', 'Autostart'), (0, 4, 'beep',\n 'Beep'), (1, 0, 'fuzzyid', 'Fuzzy identification'), (1, 1,\n 'fuzzycontrol', 'Fuzzy control'), (1, 2, 'fuzzystatus',\n 'Fuzzy status'), (1, 3, 'watchdog_on', 'Watchdog'), (1, 4,\n 'watchdog_setpoint', 'Watchdog setpoint')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n othergrid.attach(self.indicators[vn], column, row, 1, 1)\n self.update_indicators()\n\n def on_mainwidget_map(self, window):\n if super().on_mainwidget_map(window):\n return True\n self.update_indicators()\n\n def update_indicators(self):\n dev = self.instrument.get_device('haakephoenix')\n for vn in self.indicators:\n self.on_device_variable_change(dev, vn, dev.get_variable(vn))\n self.builder.get_object('setpoint_adjustment').set_value(dev.\n get_variable('setpoint'))\n self.builder.get_object('lowlimit_adjustment').set_value(dev.\n get_variable('lowlimit'))\n self.builder.get_object('highlimit_adjustment').set_value(dev.\n get_variable('highlimit'))\n\n def on_device_variable_change(self, device, variablename, newvalue):\n if variablename in ['_status', 'firmwareversion', 'fuzzycontrol',\n 'date', 'time', 'faultstatus']:\n self.indicators[variablename].set_value(str(newvalue),\n IndicatorState.NEUTRAL)\n elif variablename in ['setpoint', 'temperature', 'lowlimit',\n 'highlimit']:\n self.indicators[variablename].set_value('%.2f°C' % newvalue,\n IndicatorState.NEUTRAL)\n elif variablename in ['control_on', 'cooling_on', 'diffcontrol_on',\n 'watchdog_on', 'beep', 'fuzzyid', 'fuzzystatus', 'autostart']:\n self.indicators[variablename].set_value(['OFF', 'ON'][int(bool(\n newvalue))], [IndicatorState.ERROR, IndicatorState.OK][int(\n bool(newvalue))])\n elif variablename in ['pump_power']:\n self.indicators[variablename].set_value('%.2f %%' % newvalue, [\n IndicatorState.ERROR, IndicatorState.OK][newvalue > 0])\n elif variablename in ['external_pt100_error',\n 'internal_pt100_error', 'liquid_level_low_error',\n 'cooling_error', 'main_relay_missing_error']:\n self.indicators[variablename].set_value(['OK', 'ERROR'][int(\n bool(newvalue))], [IndicatorState.OK, IndicatorState.ERROR]\n [int(bool(newvalue))])\n elif variablename in ['liquid_level_alarm_error',\n 'external_alarm_error', 'overtemperature_error']:\n self.indicators[variablename].set_value(['OK', 'ALARM'][int(\n bool(newvalue))], [IndicatorState.OK, IndicatorState.ERROR]\n [int(bool(newvalue))])\n elif variablename in ['pump_overload_error']:\n self.indicators[variablename].set_value(['OK', 'OVERLOAD'][int(\n bool(newvalue))], [IndicatorState.OK, IndicatorState.ERROR]\n [int(bool(newvalue))])\n elif variablename in ['watchdog_setpoint']:\n self.indicators[variablename].set_value('%.2f sec' % newvalue,\n IndicatorState.UNKNOWN)\n elif variablename in ['control_external']:\n self.indicators[variablename].set_value(['Internal', 'External'\n ][int(bool(newvalue))], IndicatorState.NEUTRAL)\n if variablename == 'fuzzyid':\n self.builder.get_object('fuzzyid_switch').set_state(bool(newvalue))\n elif variablename == 'pump_power':\n self.builder.get_object('circulator_switch').set_state(newvalue > 0\n )\n return False\n\n def on_circulator_switch_state_set(self, switch, state):\n dev = self.instrument.get_device('haakephoenix')\n if state:\n dev.execute_command('start')\n else:\n dev.execute_command('stop')\n return True\n\n def on_fuzzyid_switch_state_set(self, switch, state):\n self.instrument.get_device('haakephoenix').set_variable('fuzzyid',\n state)\n return True\n\n def on_set_setpoint(self, button):\n spinbutton = self.builder.get_object('setpoint_spin')\n self.instrument.get_device('haakephoenix').set_variable('setpoint',\n spinbutton.get_value())\n\n def on_set_lowlimit(self, button):\n spinbutton = self.builder.get_object('lowlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('lowlimit',\n spinbutton.get_value())\n\n def on_set_highlimit(self, button):\n spinbutton = self.builder.get_object('highlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('highlimit',\n spinbutton.get_value())\n\n def on_update_rtc(self, button):\n now = datetime.datetime.now()\n self.instrument.get_device('haakephoenix').set_variable('date', now\n .date())\n self.instrument.get_device('haakephoenix').set_variable('time', now\n .time())\n",
"step-4": "import datetime\nfrom ..core.indicator import Indicator, IndicatorState\nfrom ..core.toolwindow import ToolWindow\n\n\nclass HaakePhoenix(ToolWindow):\n required_devices = ['haakephoenix']\n\n def __init__(self, *args, **wargs):\n self.indicators = {}\n super().__init__(*args, **wargs)\n\n def init_gui(self, *args, **kwargs):\n statusgrid = self.builder.get_object('statusgrid')\n for row, column, vn, label in [(0, 0, '_status', 'Status'), (0, 1,\n 'setpoint', 'Target temperature'), (0, 2, 'temperature',\n 'Temperature'), (0, 3, 'pump_power', 'Pump speed'), (0, 4,\n 'control_on', 'Temperature control'), (1, 0, 'lowlimit',\n 'Low limit'), (1, 1, 'highlimit', 'High limit'), (1, 2,\n 'cooling_on', 'Cooling'), (1, 3, 'control_external', 'Control'),\n (1, 4, 'diffcontrol_on', 'Differential control')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n statusgrid.attach(self.indicators[vn], column, row, 1, 1)\n errorgrid = self.builder.get_object('errorgrid')\n for row, column, vn, label in [(0, 0, 'external_pt100_error',\n 'External Pt100'), (0, 1, 'internal_pt100_error',\n 'Internal Pt100'), (0, 2, 'liquid_level_low_error',\n 'Liquid level'), (0, 3, 'liquid_level_alarm_error',\n 'Liquid level alarm'), (0, 4, 'cooling_error', 'Cooling system'\n ), (1, 0, 'pump_overload_error', 'Pump'), (1, 1,\n 'external_alarm_error', 'External alarm'), (1, 2,\n 'overtemperature_error', 'Overtemperature'), (1, 3,\n 'main_relay_missing_error', 'Main relay'), (1, 4, 'faultstatus',\n 'Status flags')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n errorgrid.attach(self.indicators[vn], column, row, 1, 1)\n othergrid = self.builder.get_object('othergrid')\n for row, column, vn, label in [(0, 0, 'firmwareversion',\n 'Firmware version'), (0, 1, 'date', 'Date'), (0, 2, 'time',\n 'Time'), (0, 3, 'autostart', 'Autostart'), (0, 4, 'beep',\n 'Beep'), (1, 0, 'fuzzyid', 'Fuzzy identification'), (1, 1,\n 'fuzzycontrol', 'Fuzzy control'), (1, 2, 'fuzzystatus',\n 'Fuzzy status'), (1, 3, 'watchdog_on', 'Watchdog'), (1, 4,\n 'watchdog_setpoint', 'Watchdog setpoint')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN\n )\n othergrid.attach(self.indicators[vn], column, row, 1, 1)\n self.update_indicators()\n\n def on_mainwidget_map(self, window):\n if super().on_mainwidget_map(window):\n return True\n self.update_indicators()\n\n def update_indicators(self):\n dev = self.instrument.get_device('haakephoenix')\n for vn in self.indicators:\n self.on_device_variable_change(dev, vn, dev.get_variable(vn))\n self.builder.get_object('setpoint_adjustment').set_value(dev.\n get_variable('setpoint'))\n self.builder.get_object('lowlimit_adjustment').set_value(dev.\n get_variable('lowlimit'))\n self.builder.get_object('highlimit_adjustment').set_value(dev.\n get_variable('highlimit'))\n\n def on_device_variable_change(self, device, variablename, newvalue):\n if variablename in ['_status', 'firmwareversion', 'fuzzycontrol',\n 'date', 'time', 'faultstatus']:\n self.indicators[variablename].set_value(str(newvalue),\n IndicatorState.NEUTRAL)\n elif variablename in ['setpoint', 'temperature', 'lowlimit',\n 'highlimit']:\n self.indicators[variablename].set_value('%.2f°C' % newvalue,\n IndicatorState.NEUTRAL)\n elif variablename in ['control_on', 'cooling_on', 'diffcontrol_on',\n 'watchdog_on', 'beep', 'fuzzyid', 'fuzzystatus', 'autostart']:\n self.indicators[variablename].set_value(['OFF', 'ON'][int(bool(\n newvalue))], [IndicatorState.ERROR, IndicatorState.OK][int(\n bool(newvalue))])\n elif variablename in ['pump_power']:\n self.indicators[variablename].set_value('%.2f %%' % newvalue, [\n IndicatorState.ERROR, IndicatorState.OK][newvalue > 0])\n elif variablename in ['external_pt100_error',\n 'internal_pt100_error', 'liquid_level_low_error',\n 'cooling_error', 'main_relay_missing_error']:\n self.indicators[variablename].set_value(['OK', 'ERROR'][int(\n bool(newvalue))], [IndicatorState.OK, IndicatorState.ERROR]\n [int(bool(newvalue))])\n elif variablename in ['liquid_level_alarm_error',\n 'external_alarm_error', 'overtemperature_error']:\n self.indicators[variablename].set_value(['OK', 'ALARM'][int(\n bool(newvalue))], [IndicatorState.OK, IndicatorState.ERROR]\n [int(bool(newvalue))])\n elif variablename in ['pump_overload_error']:\n self.indicators[variablename].set_value(['OK', 'OVERLOAD'][int(\n bool(newvalue))], [IndicatorState.OK, IndicatorState.ERROR]\n [int(bool(newvalue))])\n elif variablename in ['watchdog_setpoint']:\n self.indicators[variablename].set_value('%.2f sec' % newvalue,\n IndicatorState.UNKNOWN)\n elif variablename in ['control_external']:\n self.indicators[variablename].set_value(['Internal', 'External'\n ][int(bool(newvalue))], IndicatorState.NEUTRAL)\n if variablename == 'fuzzyid':\n self.builder.get_object('fuzzyid_switch').set_state(bool(newvalue))\n elif variablename == 'pump_power':\n self.builder.get_object('circulator_switch').set_state(newvalue > 0\n )\n return False\n\n def on_circulator_switch_state_set(self, switch, state):\n dev = self.instrument.get_device('haakephoenix')\n if state:\n dev.execute_command('start')\n else:\n dev.execute_command('stop')\n return True\n\n def on_fuzzyid_switch_state_set(self, switch, state):\n self.instrument.get_device('haakephoenix').set_variable('fuzzyid',\n state)\n return True\n\n def on_set_setpoint(self, button):\n spinbutton = self.builder.get_object('setpoint_spin')\n self.instrument.get_device('haakephoenix').set_variable('setpoint',\n spinbutton.get_value())\n\n def on_set_lowlimit(self, button):\n spinbutton = self.builder.get_object('lowlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('lowlimit',\n spinbutton.get_value())\n\n def on_set_highlimit(self, button):\n spinbutton = self.builder.get_object('highlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('highlimit',\n spinbutton.get_value())\n\n def on_update_rtc(self, button):\n now = datetime.datetime.now()\n self.instrument.get_device('haakephoenix').set_variable('date', now\n .date())\n self.instrument.get_device('haakephoenix').set_variable('time', now\n .time())\n",
"step-5": "import datetime\n\nfrom ..core.indicator import Indicator, IndicatorState\nfrom ..core.toolwindow import ToolWindow\n\n\nclass HaakePhoenix(ToolWindow):\n required_devices = ['haakephoenix']\n\n def __init__(self, *args, **wargs):\n self.indicators = {}\n super().__init__(*args, **wargs)\n\n def init_gui(self, *args, **kwargs):\n statusgrid = self.builder.get_object('statusgrid')\n for row, column, vn, label in [(0, 0, '_status', 'Status'),\n (0, 1, 'setpoint', 'Target temperature'),\n (0, 2, 'temperature', 'Temperature'),\n (0, 3, 'pump_power', 'Pump speed'),\n (0, 4, 'control_on', 'Temperature control'),\n (1, 0, 'lowlimit', 'Low limit'),\n (1, 1, 'highlimit', 'High limit'),\n (1, 2, 'cooling_on', 'Cooling'),\n (1, 3, 'control_external', 'Control'),\n (1, 4, 'diffcontrol_on', 'Differential control')]:\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)\n statusgrid.attach(self.indicators[vn], column, row, 1, 1)\n errorgrid = self.builder.get_object('errorgrid')\n for row, column, vn, label in [(0, 0, 'external_pt100_error', 'External Pt100'), #\n (0, 1, 'internal_pt100_error', 'Internal Pt100'), #\n (0, 2, 'liquid_level_low_error', 'Liquid level'), #\n (0, 3, 'liquid_level_alarm_error', 'Liquid level alarm'), #\n (0, 4, 'cooling_error', 'Cooling system'), #\n (1, 0, 'pump_overload_error', 'Pump'), #\n (1, 1, 'external_alarm_error', 'External alarm'), #\n (1, 2, 'overtemperature_error', 'Overtemperature'), #\n (1, 3, 'main_relay_missing_error', 'Main relay'), #\n (1, 4, 'faultstatus', 'Status flags')]: #\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)\n errorgrid.attach(self.indicators[vn], column, row, 1, 1)\n othergrid = self.builder.get_object('othergrid')\n for row, column, vn, label in [(0, 0, 'firmwareversion', 'Firmware version'), #\n (0, 1, 'date', 'Date'), #\n (0, 2, 'time', 'Time'), #\n (0, 3, 'autostart', 'Autostart'), #\n (0, 4, 'beep', 'Beep'), #\n (1, 0, 'fuzzyid', 'Fuzzy identification'), #\n (1, 1, 'fuzzycontrol', 'Fuzzy control'), #\n (1, 2, 'fuzzystatus', 'Fuzzy status'), #\n (1, 3, 'watchdog_on', 'Watchdog'), #\n (1, 4, 'watchdog_setpoint', 'Watchdog setpoint')]: #\n self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)\n othergrid.attach(self.indicators[vn], column, row, 1, 1)\n self.update_indicators()\n\n def on_mainwidget_map(self, window):\n if super().on_mainwidget_map(window):\n return True\n self.update_indicators()\n\n def update_indicators(self):\n dev = self.instrument.get_device('haakephoenix')\n for vn in self.indicators:\n self.on_device_variable_change(dev, vn, dev.get_variable(vn))\n self.builder.get_object('setpoint_adjustment').set_value(\n dev.get_variable('setpoint'))\n self.builder.get_object('lowlimit_adjustment').set_value(\n dev.get_variable('lowlimit'))\n self.builder.get_object('highlimit_adjustment').set_value(\n dev.get_variable('highlimit'))\n\n def on_device_variable_change(self, device, variablename, newvalue):\n if variablename in ['_status', 'firmwareversion', 'fuzzycontrol', 'date', 'time', 'faultstatus']:\n self.indicators[variablename].set_value(str(newvalue), IndicatorState.NEUTRAL)\n elif variablename in ['setpoint', 'temperature', 'lowlimit', 'highlimit']:\n self.indicators[variablename].set_value('%.2f°C' % newvalue, IndicatorState.NEUTRAL)\n elif variablename in ['control_on', 'cooling_on', 'diffcontrol_on', 'watchdog_on', 'beep', 'fuzzyid',\n 'fuzzystatus',\n 'autostart']:\n self.indicators[variablename].set_value(['OFF', 'ON'][int(bool(newvalue))],\n [IndicatorState.ERROR, IndicatorState.OK][int(bool(newvalue))])\n elif variablename in ['pump_power']:\n self.indicators[variablename].set_value('%.2f %%' % newvalue,\n [IndicatorState.ERROR, IndicatorState.OK][newvalue > 0])\n elif variablename in ['external_pt100_error', 'internal_pt100_error', 'liquid_level_low_error', 'cooling_error',\n 'main_relay_missing_error']:\n self.indicators[variablename].set_value(['OK', 'ERROR'][int(bool(newvalue))],\n [IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])\n elif variablename in ['liquid_level_alarm_error', 'external_alarm_error', 'overtemperature_error']:\n self.indicators[variablename].set_value(['OK', 'ALARM'][int(bool(newvalue))],\n [IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])\n elif variablename in ['pump_overload_error']:\n self.indicators[variablename].set_value(['OK', 'OVERLOAD'][int(bool(newvalue))],\n [IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])\n elif variablename in ['watchdog_setpoint']:\n self.indicators[variablename].set_value('%.2f sec' % newvalue, IndicatorState.UNKNOWN)\n elif variablename in ['control_external']:\n self.indicators[variablename].set_value(['Internal', 'External'][int(bool(newvalue))],\n IndicatorState.NEUTRAL)\n\n if variablename == 'fuzzyid':\n self.builder.get_object('fuzzyid_switch').set_state(bool(newvalue))\n elif variablename == 'pump_power':\n self.builder.get_object('circulator_switch').set_state(newvalue > 0)\n return False\n\n def on_circulator_switch_state_set(self, switch, state):\n dev = self.instrument.get_device('haakephoenix')\n if state:\n dev.execute_command('start')\n else:\n dev.execute_command('stop')\n return True\n\n def on_fuzzyid_switch_state_set(self, switch, state):\n self.instrument.get_device('haakephoenix').set_variable('fuzzyid', state)\n return True\n\n def on_set_setpoint(self, button):\n spinbutton = self.builder.get_object('setpoint_spin')\n self.instrument.get_device('haakephoenix').set_variable('setpoint', spinbutton.get_value())\n\n def on_set_lowlimit(self, button):\n spinbutton = self.builder.get_object('lowlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('lowlimit', spinbutton.get_value())\n\n def on_set_highlimit(self, button):\n spinbutton = self.builder.get_object('highlimit_spin')\n self.instrument.get_device('haakephoenix').set_variable('highlimit', spinbutton.get_value())\n\n def on_update_rtc(self, button):\n now = datetime.datetime.now()\n self.instrument.get_device('haakephoenix').set_variable('date', now.date())\n self.instrument.get_device('haakephoenix').set_variable('time', now.time())\n",
"step-ids": [
8,
10,
13,
14,
15
]
}
|
[
8,
10,
13,
14,
15
] |
###
### Copyright 2009 The Chicago Independent Radio Project
### All Rights Reserved.
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
"""CHIRP authentication system."""
import base64
import logging
import os
import time
from common import in_prod
from common.autoretry import AutoRetry
# TODO(trow): This is a work-around for problems with PyCrypto on the Mac.
# For more information, see
# http://code.google.com/p/googleappengine/issues/detail?id=1627
_DISABLE_CRYPTO = False
try:
from Crypto.Cipher import AES
from Crypto.Hash import HMAC
except ImportError:
# Only allow crypto to be disabled if we are running in a local
# development environment.
if in_prod():
raise
_DISABLE_CRYPTO = True
logging.warn("PyCrypto not found! Operating in insecure mode!")
from django import http
from auth.models import User, KeyStorage
from auth import roles
# Our logout URL.
LOGOUT_URL = "/auth/goodbye/"
# Users are ultimately redirected to the URL after logging out.
_FINAL_LOGOUT_URL = '/auth/hello/'
# The name of the cookie used to store our security token.
_CHIRP_SECURITY_TOKEN_COOKIE = 'chirp_security_token'
# Our security tokens expire after 24 hours.
# TODO(kumar) set this back to two hours after
# all CHIRP volunteers have set initial password?
_TOKEN_TIMEOUT_S = 24 * 60 * 60
class UserNotAllowedError(Exception):
"""Raised when the user is recognized but forbidden from entering."""
class _Credentials(object):
email = None
security_token_is_stale = False
def _create_security_token(user):
"""Create a CHIRP security token.
Args:
user: A User object.
Returns:
A string containing an encrypted security token that encodes
the user's email address as well as a timestamp.
"""
timestamp = int(time.time())
plaintext = "%x %s" % (timestamp, user.email)
nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)
# Pad plaintest with whitespace to make the length a multiple of 16,
# as this is a requirement of AES encryption.
plaintext = plaintext.rjust(nearest_mult_of_16, ' ')
if _DISABLE_CRYPTO:
body = plaintext
sig = "sig"
else:
key_storage = KeyStorage.get()
body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)
hmac_key = key_storage.hmac_key
if type(hmac_key) == unicode:
# Crypto requires byte strings
hmac_key = hmac_key.encode('utf8')
sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()
return '%s:%s' % (sig, body)
def _parse_security_token(token):
"""Parse a CHIRP security token.
Returns:
A Credentials object, or None if the token is not valid.
If a Credentials object is returned, its "user" field will not
be set.
"""
if not token:
return None
if ':' not in token:
logging.warn('Malformed token: no signature separator')
return None
sig, body = token.split(':', 1)
if _DISABLE_CRYPTO:
plaintext = body
else:
key_storage = KeyStorage.get()
hmac_key = key_storage.hmac_key
if type(hmac_key) == unicode:
# Crypto requires byte strings
hmac_key = hmac_key.encode('utf8')
computed_sig = HMAC.HMAC(key=hmac_key,
msg=body).hexdigest()
if sig != computed_sig:
logging.warn('Malformed token: invalid signature')
return None
try:
plaintext = AES.new(key_storage.aes_key,
AES.MODE_CBC).decrypt(body)
except ValueError:
logging.warn('Malformed token: wrong size')
return None
# Remove excess whitespace.
plaintext = plaintext.strip()
# The plaintext should contain at least one space.
if ' ' not in plaintext:
logging.warn('Malformed token: bad contents')
return None
parts = plaintext.split(' ')
if len(parts) != 2:
logging.warn('Malformed token: bad structure')
return None
timestamp, email = parts
try:
timestamp = int(timestamp, 16)
except ValueError:
logging.warn('Malformed token: bad timestamp')
return None
# Reject tokens that are too old or which have time-traveled. We
# allow for 1s of clock skew.
age_s = time.time() - timestamp
if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:
logging.warn('Malformed token: expired (age=%ds)', age_s)
return None
cred = _Credentials()
cred.email = email
cred.security_token_is_stale = (age_s > 0.5 * _TOKEN_TIMEOUT_S)
return cred
def attach_credentials(response, user):
"""Attach a user's credentials to a response.
Args:
response: An HttpResponse object.
user: A User object.
"""
response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,
_create_security_token(user))
def get_current_user(request):
"""Get the current logged-in user's.
Returns:
A User object, or None if the user is not logged in.
Raises:
UserNotAllowedError if the user is prohibited from accessing
the site.
"""
cred = None
token = request.COOKIES.get(_CHIRP_SECURITY_TOKEN_COOKIE)
if token:
cred = _parse_security_token(token)
# If this is a POST, look for a base64-encoded security token in
# the CHIRP_Auth variable.
if cred is None and request.method == 'POST':
token = request.POST.get("CHIRP_Auth")
if token:
try:
token = base64.urlsafe_b64decode(token)
except TypeError:
token = None
if token:
cred = _parse_security_token(token)
# No valid token? This is hopeless!
if cred is None:
return None
# Try to find a user for this email address.
user = User.get_by_email(cred.email)
if user is None:
return None
# Reject inactive users.
if not user.is_active:
logging.info('Rejected inactive user %s', user.email)
raise UserNotAllowedError
user._credentials = cred
return user
def create_login_url(path):
"""Returns the URL of a login page that redirects to 'path' on success."""
return "/auth/hello?redirect=%s" % path
def logout(redirect=None):
"""Create an HTTP response that will log a user out.
The redirect param can be a relative URL in which case
the user will go back to the same page when logging in.
This is useful for switching users like on the playlist
tracker page.
Returns:
An HttpResponse object that will log the user out.
"""
# If the user was signed in and has a cookie, clear it.
logout_url = _FINAL_LOGOUT_URL
if redirect:
logout_url = '%s?redirect=%s' % (logout_url, redirect)
response = http.HttpResponseRedirect(logout_url)
response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')
return response
def get_password_reset_token(user):
"""A URL-safe token that authenticates a user for a password reset."""
return base64.urlsafe_b64encode(_create_security_token(user))
def parse_password_reset_token(token):
"""Extracts an email address from a valid password reset token."""
try:
token = base64.urlsafe_b64decode(str(token))
except TypeError:
return None
cred = _parse_security_token(token)
return cred and cred.email
|
normal
|
{
"blob_id": "d077f32061b87a4bfd6a0ac226730957a4000804",
"index": 5859,
"step-1": "<mask token>\n\n\nclass UserNotAllowedError(Exception):\n \"\"\"Raised when the user is recognized but forbidden from entering.\"\"\"\n\n\nclass _Credentials(object):\n email = None\n security_token_is_stale = False\n\n\n<mask token>\n\n\ndef _parse_security_token(token):\n \"\"\"Parse a CHIRP security token.\n\n Returns:\n A Credentials object, or None if the token is not valid.\n If a Credentials object is returned, its \"user\" field will not\n be set.\n \"\"\"\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key, AES.MODE_CBC).decrypt(body\n )\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n plaintext = plaintext.strip()\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = age_s > 0.5 * _TOKEN_TIMEOUT_S\n return cred\n\n\ndef attach_credentials(response, user):\n \"\"\"Attach a user's credentials to a response.\n\n Args:\n response: An HttpResponse object.\n user: A User object.\n \"\"\"\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,\n _create_security_token(user))\n\n\n<mask token>\n\n\ndef create_login_url(path):\n \"\"\"Returns the URL of a login page that redirects to 'path' on success.\"\"\"\n return '/auth/hello?redirect=%s' % path\n\n\ndef logout(redirect=None):\n \"\"\"Create an HTTP response that will log a user out.\n \n The redirect param can be a relative URL in which case \n the user will go back to the same page when logging in.\n This is useful for switching users like on the playlist \n tracker page.\n \n Returns:\n An HttpResponse object that will log the user out.\n \"\"\"\n logout_url = _FINAL_LOGOUT_URL\n if redirect:\n logout_url = '%s?redirect=%s' % (logout_url, redirect)\n response = http.HttpResponseRedirect(logout_url)\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')\n return response\n\n\ndef get_password_reset_token(user):\n \"\"\"A URL-safe token that authenticates a user for a password reset.\"\"\"\n return base64.urlsafe_b64encode(_create_security_token(user))\n\n\ndef parse_password_reset_token(token):\n \"\"\"Extracts an email address from a valid password reset token.\"\"\"\n try:\n token = base64.urlsafe_b64decode(str(token))\n except TypeError:\n return None\n cred = _parse_security_token(token)\n return cred and cred.email\n",
"step-2": "<mask token>\n\n\nclass UserNotAllowedError(Exception):\n \"\"\"Raised when the user is recognized but forbidden from entering.\"\"\"\n\n\nclass _Credentials(object):\n email = None\n security_token_is_stale = False\n\n\ndef _create_security_token(user):\n \"\"\"Create a CHIRP security token.\n\n Args:\n user: A User object.\n\n Returns:\n A string containing an encrypted security token that encodes\n the user's email address as well as a timestamp.\n \"\"\"\n timestamp = int(time.time())\n plaintext = '%x %s' % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = 'sig'\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)\n\n\ndef _parse_security_token(token):\n \"\"\"Parse a CHIRP security token.\n\n Returns:\n A Credentials object, or None if the token is not valid.\n If a Credentials object is returned, its \"user\" field will not\n be set.\n \"\"\"\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key, AES.MODE_CBC).decrypt(body\n )\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n plaintext = plaintext.strip()\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = age_s > 0.5 * _TOKEN_TIMEOUT_S\n return cred\n\n\ndef attach_credentials(response, user):\n \"\"\"Attach a user's credentials to a response.\n\n Args:\n response: An HttpResponse object.\n user: A User object.\n \"\"\"\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,\n _create_security_token(user))\n\n\n<mask token>\n\n\ndef create_login_url(path):\n \"\"\"Returns the URL of a login page that redirects to 'path' on success.\"\"\"\n return '/auth/hello?redirect=%s' % path\n\n\ndef logout(redirect=None):\n \"\"\"Create an HTTP response that will log a user out.\n \n The redirect param can be a relative URL in which case \n the user will go back to the same page when logging in.\n This is useful for switching users like on the playlist \n tracker page.\n \n Returns:\n An HttpResponse object that will log the user out.\n \"\"\"\n logout_url = _FINAL_LOGOUT_URL\n if redirect:\n logout_url = '%s?redirect=%s' % (logout_url, redirect)\n response = http.HttpResponseRedirect(logout_url)\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')\n return response\n\n\ndef get_password_reset_token(user):\n \"\"\"A URL-safe token that authenticates a user for a password reset.\"\"\"\n return base64.urlsafe_b64encode(_create_security_token(user))\n\n\ndef parse_password_reset_token(token):\n \"\"\"Extracts an email address from a valid password reset token.\"\"\"\n try:\n token = base64.urlsafe_b64decode(str(token))\n except TypeError:\n return None\n cred = _parse_security_token(token)\n return cred and cred.email\n",
"step-3": "<mask token>\n\n\nclass UserNotAllowedError(Exception):\n \"\"\"Raised when the user is recognized but forbidden from entering.\"\"\"\n\n\nclass _Credentials(object):\n email = None\n security_token_is_stale = False\n\n\ndef _create_security_token(user):\n \"\"\"Create a CHIRP security token.\n\n Args:\n user: A User object.\n\n Returns:\n A string containing an encrypted security token that encodes\n the user's email address as well as a timestamp.\n \"\"\"\n timestamp = int(time.time())\n plaintext = '%x %s' % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = 'sig'\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)\n\n\ndef _parse_security_token(token):\n \"\"\"Parse a CHIRP security token.\n\n Returns:\n A Credentials object, or None if the token is not valid.\n If a Credentials object is returned, its \"user\" field will not\n be set.\n \"\"\"\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key, AES.MODE_CBC).decrypt(body\n )\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n plaintext = plaintext.strip()\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = age_s > 0.5 * _TOKEN_TIMEOUT_S\n return cred\n\n\ndef attach_credentials(response, user):\n \"\"\"Attach a user's credentials to a response.\n\n Args:\n response: An HttpResponse object.\n user: A User object.\n \"\"\"\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,\n _create_security_token(user))\n\n\ndef get_current_user(request):\n \"\"\"Get the current logged-in user's.\n\n Returns:\n A User object, or None if the user is not logged in.\n\n Raises:\n UserNotAllowedError if the user is prohibited from accessing\n the site.\n \"\"\"\n cred = None\n token = request.COOKIES.get(_CHIRP_SECURITY_TOKEN_COOKIE)\n if token:\n cred = _parse_security_token(token)\n if cred is None and request.method == 'POST':\n token = request.POST.get('CHIRP_Auth')\n if token:\n try:\n token = base64.urlsafe_b64decode(token)\n except TypeError:\n token = None\n if token:\n cred = _parse_security_token(token)\n if cred is None:\n return None\n user = User.get_by_email(cred.email)\n if user is None:\n return None\n if not user.is_active:\n logging.info('Rejected inactive user %s', user.email)\n raise UserNotAllowedError\n user._credentials = cred\n return user\n\n\ndef create_login_url(path):\n \"\"\"Returns the URL of a login page that redirects to 'path' on success.\"\"\"\n return '/auth/hello?redirect=%s' % path\n\n\ndef logout(redirect=None):\n \"\"\"Create an HTTP response that will log a user out.\n \n The redirect param can be a relative URL in which case \n the user will go back to the same page when logging in.\n This is useful for switching users like on the playlist \n tracker page.\n \n Returns:\n An HttpResponse object that will log the user out.\n \"\"\"\n logout_url = _FINAL_LOGOUT_URL\n if redirect:\n logout_url = '%s?redirect=%s' % (logout_url, redirect)\n response = http.HttpResponseRedirect(logout_url)\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')\n return response\n\n\ndef get_password_reset_token(user):\n \"\"\"A URL-safe token that authenticates a user for a password reset.\"\"\"\n return base64.urlsafe_b64encode(_create_security_token(user))\n\n\ndef parse_password_reset_token(token):\n \"\"\"Extracts an email address from a valid password reset token.\"\"\"\n try:\n token = base64.urlsafe_b64decode(str(token))\n except TypeError:\n return None\n cred = _parse_security_token(token)\n return cred and cred.email\n",
"step-4": "<mask token>\ntry:\n from Crypto.Cipher import AES\n from Crypto.Hash import HMAC\nexcept ImportError:\n if in_prod():\n raise\n _DISABLE_CRYPTO = True\n logging.warn('PyCrypto not found! Operating in insecure mode!')\n<mask token>\n\n\nclass UserNotAllowedError(Exception):\n \"\"\"Raised when the user is recognized but forbidden from entering.\"\"\"\n\n\nclass _Credentials(object):\n email = None\n security_token_is_stale = False\n\n\ndef _create_security_token(user):\n \"\"\"Create a CHIRP security token.\n\n Args:\n user: A User object.\n\n Returns:\n A string containing an encrypted security token that encodes\n the user's email address as well as a timestamp.\n \"\"\"\n timestamp = int(time.time())\n plaintext = '%x %s' % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = 'sig'\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)\n\n\ndef _parse_security_token(token):\n \"\"\"Parse a CHIRP security token.\n\n Returns:\n A Credentials object, or None if the token is not valid.\n If a Credentials object is returned, its \"user\" field will not\n be set.\n \"\"\"\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key, AES.MODE_CBC).decrypt(body\n )\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n plaintext = plaintext.strip()\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = age_s > 0.5 * _TOKEN_TIMEOUT_S\n return cred\n\n\ndef attach_credentials(response, user):\n \"\"\"Attach a user's credentials to a response.\n\n Args:\n response: An HttpResponse object.\n user: A User object.\n \"\"\"\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,\n _create_security_token(user))\n\n\ndef get_current_user(request):\n \"\"\"Get the current logged-in user's.\n\n Returns:\n A User object, or None if the user is not logged in.\n\n Raises:\n UserNotAllowedError if the user is prohibited from accessing\n the site.\n \"\"\"\n cred = None\n token = request.COOKIES.get(_CHIRP_SECURITY_TOKEN_COOKIE)\n if token:\n cred = _parse_security_token(token)\n if cred is None and request.method == 'POST':\n token = request.POST.get('CHIRP_Auth')\n if token:\n try:\n token = base64.urlsafe_b64decode(token)\n except TypeError:\n token = None\n if token:\n cred = _parse_security_token(token)\n if cred is None:\n return None\n user = User.get_by_email(cred.email)\n if user is None:\n return None\n if not user.is_active:\n logging.info('Rejected inactive user %s', user.email)\n raise UserNotAllowedError\n user._credentials = cred\n return user\n\n\ndef create_login_url(path):\n \"\"\"Returns the URL of a login page that redirects to 'path' on success.\"\"\"\n return '/auth/hello?redirect=%s' % path\n\n\ndef logout(redirect=None):\n \"\"\"Create an HTTP response that will log a user out.\n \n The redirect param can be a relative URL in which case \n the user will go back to the same page when logging in.\n This is useful for switching users like on the playlist \n tracker page.\n \n Returns:\n An HttpResponse object that will log the user out.\n \"\"\"\n logout_url = _FINAL_LOGOUT_URL\n if redirect:\n logout_url = '%s?redirect=%s' % (logout_url, redirect)\n response = http.HttpResponseRedirect(logout_url)\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')\n return response\n\n\ndef get_password_reset_token(user):\n \"\"\"A URL-safe token that authenticates a user for a password reset.\"\"\"\n return base64.urlsafe_b64encode(_create_security_token(user))\n\n\ndef parse_password_reset_token(token):\n \"\"\"Extracts an email address from a valid password reset token.\"\"\"\n try:\n token = base64.urlsafe_b64decode(str(token))\n except TypeError:\n return None\n cred = _parse_security_token(token)\n return cred and cred.email\n",
"step-5": "###\n### Copyright 2009 The Chicago Independent Radio Project\n### All Rights Reserved.\n###\n### Licensed under the Apache License, Version 2.0 (the \"License\");\n### you may not use this file except in compliance with the License.\n### You may obtain a copy of the License at\n###\n### http://www.apache.org/licenses/LICENSE-2.0\n###\n### Unless required by applicable law or agreed to in writing, software\n### distributed under the License is distributed on an \"AS IS\" BASIS,\n### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n### See the License for the specific language governing permissions and\n### limitations under the License.\n###\n\n\"\"\"CHIRP authentication system.\"\"\"\n\nimport base64\nimport logging\nimport os\nimport time\n\nfrom common import in_prod\nfrom common.autoretry import AutoRetry\n\n# TODO(trow): This is a work-around for problems with PyCrypto on the Mac.\n# For more information, see\n# http://code.google.com/p/googleappengine/issues/detail?id=1627\n_DISABLE_CRYPTO = False\ntry:\n from Crypto.Cipher import AES\n from Crypto.Hash import HMAC\nexcept ImportError:\n # Only allow crypto to be disabled if we are running in a local\n # development environment.\n if in_prod():\n raise\n _DISABLE_CRYPTO = True\n logging.warn(\"PyCrypto not found! Operating in insecure mode!\")\n \nfrom django import http\nfrom auth.models import User, KeyStorage\nfrom auth import roles\n\n# Our logout URL.\nLOGOUT_URL = \"/auth/goodbye/\"\n\n# Users are ultimately redirected to the URL after logging out.\n_FINAL_LOGOUT_URL = '/auth/hello/'\n\n# The name of the cookie used to store our security token.\n_CHIRP_SECURITY_TOKEN_COOKIE = 'chirp_security_token'\n\n# Our security tokens expire after 24 hours.\n# TODO(kumar) set this back to two hours after \n# all CHIRP volunteers have set initial password?\n_TOKEN_TIMEOUT_S = 24 * 60 * 60\n\n\nclass UserNotAllowedError(Exception):\n \"\"\"Raised when the user is recognized but forbidden from entering.\"\"\"\n\n\nclass _Credentials(object):\n email = None\n security_token_is_stale = False\n\n\ndef _create_security_token(user):\n \"\"\"Create a CHIRP security token.\n\n Args:\n user: A User object.\n\n Returns:\n A string containing an encrypted security token that encodes\n the user's email address as well as a timestamp.\n \"\"\"\n timestamp = int(time.time())\n plaintext = \"%x %s\" % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n # Pad plaintest with whitespace to make the length a multiple of 16,\n # as this is a requirement of AES encryption.\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = \"sig\"\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)\n\ndef _parse_security_token(token):\n \"\"\"Parse a CHIRP security token.\n\n Returns:\n A Credentials object, or None if the token is not valid.\n If a Credentials object is returned, its \"user\" field will not\n be set.\n \"\"\"\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key,\n msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key,\n AES.MODE_CBC).decrypt(body)\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n # Remove excess whitespace.\n plaintext = plaintext.strip()\n # The plaintext should contain at least one space.\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n # Reject tokens that are too old or which have time-traveled. We\n # allow for 1s of clock skew.\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = (age_s > 0.5 * _TOKEN_TIMEOUT_S)\n return cred\n\n\ndef attach_credentials(response, user):\n \"\"\"Attach a user's credentials to a response.\n\n Args:\n response: An HttpResponse object.\n user: A User object.\n \"\"\"\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,\n _create_security_token(user))\n\n\ndef get_current_user(request):\n \"\"\"Get the current logged-in user's.\n\n Returns:\n A User object, or None if the user is not logged in.\n\n Raises:\n UserNotAllowedError if the user is prohibited from accessing\n the site.\n \"\"\"\n cred = None\n token = request.COOKIES.get(_CHIRP_SECURITY_TOKEN_COOKIE)\n if token:\n cred = _parse_security_token(token)\n # If this is a POST, look for a base64-encoded security token in\n # the CHIRP_Auth variable.\n if cred is None and request.method == 'POST':\n token = request.POST.get(\"CHIRP_Auth\")\n if token:\n try:\n token = base64.urlsafe_b64decode(token)\n except TypeError:\n token = None\n if token:\n cred = _parse_security_token(token)\n # No valid token? This is hopeless!\n if cred is None:\n return None\n # Try to find a user for this email address.\n user = User.get_by_email(cred.email)\n if user is None:\n return None\n # Reject inactive users.\n if not user.is_active:\n logging.info('Rejected inactive user %s', user.email)\n raise UserNotAllowedError\n user._credentials = cred\n return user\n\n\ndef create_login_url(path):\n \"\"\"Returns the URL of a login page that redirects to 'path' on success.\"\"\"\n return \"/auth/hello?redirect=%s\" % path\n\n\ndef logout(redirect=None):\n \"\"\"Create an HTTP response that will log a user out.\n \n The redirect param can be a relative URL in which case \n the user will go back to the same page when logging in.\n This is useful for switching users like on the playlist \n tracker page.\n \n Returns:\n An HttpResponse object that will log the user out.\n \"\"\"\n # If the user was signed in and has a cookie, clear it.\n logout_url = _FINAL_LOGOUT_URL\n if redirect:\n logout_url = '%s?redirect=%s' % (logout_url, redirect)\n response = http.HttpResponseRedirect(logout_url)\n response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')\n return response\n\n\ndef get_password_reset_token(user):\n \"\"\"A URL-safe token that authenticates a user for a password reset.\"\"\"\n return base64.urlsafe_b64encode(_create_security_token(user))\n\n\ndef parse_password_reset_token(token):\n \"\"\"Extracts an email address from a valid password reset token.\"\"\"\n try:\n token = base64.urlsafe_b64decode(str(token))\n except TypeError:\n return None\n cred = _parse_security_token(token)\n return cred and cred.email\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from matplotlib import pyplot as plt
def plot_feature_VS_Observed(feature, df, linecolor):
"""
This function plots the 1880-2004 time series plots for the selected feature and observed earth
:param
Input: df -- > The dataframe of each of the features,processed before
feature --> The feature to compare with observed earth temperature
linecolor --> The line color for this feature
Output : the plot of feaure compared with observed earth temperature
"""
assert isinstance(df,pd.DataFrame)
assert isinstance(feature,str)
assert isinstance(linecolor,str)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df['Year'],
y=df[feature],
name=feature,
line_color=linecolor,
opacity=1))
fig.add_trace(go.Scatter(
x=df['Year'],
y=df['Observed'],
name="Observed",
line_color='dimgray',
opacity=0.5) )
# Use date string to set xaxis range
fig.update_layout(plot_bgcolor='rgba(0, 0, 0,0)',
xaxis_title="1880- 2005",
yaxis_title="Average Temp (K)",
title_text= feature + " vs Observed",
showlegend=True)
fig.show()
|
normal
|
{
"blob_id": "8348d353e6fdea77c9c994d541db1420ef57a797",
"index": 4399,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_feature_VS_Observed(feature, df, linecolor):\n \"\"\"\n This function plots the 1880-2004 time series plots for the selected feature and observed earth\n :param\n Input: df -- > The dataframe of each of the features,processed before\n feature --> The feature to compare with observed earth temperature\n linecolor --> The line color for this feature\n Output : the plot of feaure compared with observed earth temperature\n \"\"\"\n assert isinstance(df, pd.DataFrame)\n assert isinstance(feature, str)\n assert isinstance(linecolor, str)\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=df['Year'], y=df[feature], name=feature,\n line_color=linecolor, opacity=1))\n fig.add_trace(go.Scatter(x=df['Year'], y=df['Observed'], name=\n 'Observed', line_color='dimgray', opacity=0.5))\n fig.update_layout(plot_bgcolor='rgba(0, 0, 0,0)', xaxis_title=\n '1880- 2005', yaxis_title='Average Temp (K)', title_text=feature +\n ' vs Observed', showlegend=True)\n fig.show()\n",
"step-3": "import numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom matplotlib import pyplot as plt\n\n\ndef plot_feature_VS_Observed(feature, df, linecolor):\n \"\"\"\n This function plots the 1880-2004 time series plots for the selected feature and observed earth\n :param\n Input: df -- > The dataframe of each of the features,processed before\n feature --> The feature to compare with observed earth temperature\n linecolor --> The line color for this feature\n Output : the plot of feaure compared with observed earth temperature\n \"\"\"\n assert isinstance(df, pd.DataFrame)\n assert isinstance(feature, str)\n assert isinstance(linecolor, str)\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=df['Year'], y=df[feature], name=feature,\n line_color=linecolor, opacity=1))\n fig.add_trace(go.Scatter(x=df['Year'], y=df['Observed'], name=\n 'Observed', line_color='dimgray', opacity=0.5))\n fig.update_layout(plot_bgcolor='rgba(0, 0, 0,0)', xaxis_title=\n '1880- 2005', yaxis_title='Average Temp (K)', title_text=feature +\n ' vs Observed', showlegend=True)\n fig.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom matplotlib import pyplot as plt\n\ndef plot_feature_VS_Observed(feature, df, linecolor):\n \"\"\"\n This function plots the 1880-2004 time series plots for the selected feature and observed earth\n :param\n Input: df -- > The dataframe of each of the features,processed before\n feature --> The feature to compare with observed earth temperature\n linecolor --> The line color for this feature\n Output : the plot of feaure compared with observed earth temperature\n \"\"\"\n assert isinstance(df,pd.DataFrame)\n assert isinstance(feature,str)\n assert isinstance(linecolor,str)\n \n \n fig = go.Figure()\n \n fig.add_trace(go.Scatter(\n x=df['Year'],\n y=df[feature],\n name=feature,\n line_color=linecolor,\n opacity=1))\n \n fig.add_trace(go.Scatter(\n x=df['Year'],\n y=df['Observed'],\n name=\"Observed\",\n line_color='dimgray',\n opacity=0.5) )\n \n # Use date string to set xaxis range\n fig.update_layout(plot_bgcolor='rgba(0, 0, 0,0)',\n xaxis_title=\"1880- 2005\",\n yaxis_title=\"Average Temp (K)\",\n title_text= feature + \" vs Observed\",\n showlegend=True)\n \n fig.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(12):
if 'r' in input():
c += 1
print(c)
<|reserved_special_token_1|>
c = 0
for i in range(12):
if 'r' in input():
c += 1
print(c)
<|reserved_special_token_1|>
# ?????
c=0
for i in range(12):
if 'r' in input():
c+=1
# ??
print(c)
|
flexible
|
{
"blob_id": "294b0dc7587ecd37887591da5a1afe96a4349f6b",
"index": 8711,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n if 'r' in input():\n c += 1\nprint(c)\n",
"step-3": "c = 0\nfor i in range(12):\n if 'r' in input():\n c += 1\nprint(c)\n",
"step-4": "# ?????\r\nc=0\r\n\r\nfor i in range(12):\r\n if 'r' in input():\r\n c+=1\r\n\r\n# ??\r\nprint(c)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
data=pd.read_excel("data_SHA.xls")
fig,ax=plt.subplots()
ax.plot(data["Date"],data["HCHFI"],label="HCHFI")
ax.plot(data["Date"],data["SHA"]/2.67547,label="SSE Composite Index")
ax.plot(data["Date"],data["Hushen300 Index"]/3.20393,label="Hushen300 Index")
plt.xlabel("Time/year")
plt.ylabel("Index Point")
plt.title("Comparison of HCHFI,HS300 and SSE Composite Index")
plt.legend(loc='upper right')
plt.ylim(0,7000)
plt.show()
|
normal
|
{
"blob_id": "91df15d6d89d070677704572d35218558317a6ec",
"index": 117,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.plot(data['Date'], data['HCHFI'], label='HCHFI')\nax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')\nax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=\n 'Hushen300 Index')\nplt.xlabel('Time/year')\nplt.ylabel('Index Point')\nplt.title('Comparison of HCHFI,HS300 and SSE Composite Index')\nplt.legend(loc='upper right')\nplt.ylim(0, 7000)\nplt.show()\n",
"step-3": "<mask token>\ndata = pd.read_excel('data_SHA.xls')\nfig, ax = plt.subplots()\nax.plot(data['Date'], data['HCHFI'], label='HCHFI')\nax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')\nax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=\n 'Hushen300 Index')\nplt.xlabel('Time/year')\nplt.ylabel('Index Point')\nplt.title('Comparison of HCHFI,HS300 and SSE Composite Index')\nplt.legend(loc='upper right')\nplt.ylim(0, 7000)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\ndata = pd.read_excel('data_SHA.xls')\nfig, ax = plt.subplots()\nax.plot(data['Date'], data['HCHFI'], label='HCHFI')\nax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')\nax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=\n 'Hushen300 Index')\nplt.xlabel('Time/year')\nplt.ylabel('Index Point')\nplt.title('Comparison of HCHFI,HS300 and SSE Composite Index')\nplt.legend(loc='upper right')\nplt.ylim(0, 7000)\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\ndata=pd.read_excel(\"data_SHA.xls\")\nfig,ax=plt.subplots()\nax.plot(data[\"Date\"],data[\"HCHFI\"],label=\"HCHFI\")\nax.plot(data[\"Date\"],data[\"SHA\"]/2.67547,label=\"SSE Composite Index\")\nax.plot(data[\"Date\"],data[\"Hushen300 Index\"]/3.20393,label=\"Hushen300 Index\")\nplt.xlabel(\"Time/year\")\nplt.ylabel(\"Index Point\")\nplt.title(\"Comparison of HCHFI,HS300 and SSE Composite Index\")\nplt.legend(loc='upper right')\nplt.ylim(0,7000)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def isChar(c):
return c > 'a' and c < 'z' or c > 'A' and c < 'Z'
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([(i in line) for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
<|reserved_special_token_0|>
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > index + 1 and isChar(line[index + 1]
) and (sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]
) and '[' in line and ']' in line
<|reserved_special_token_0|>
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
<|reserved_special_token_0|>
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isChar(c):
return c > 'a' and c < 'z' or c > 'A' and c < 'Z'
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([(i in line) for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
<|reserved_special_token_0|>
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > index + 1 and isChar(line[index + 1]
) and (sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]
) and '[' in line and ']' in line
def parseInt(s, start=0):
tmp = ''
while start < len(s):
if isDigit(s[start]):
tmp += s[start]
elif len(tmp):
break
start += 1
return int(tmp), start - len(tmp)
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
def main1(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
indent = ''
instruct = False
inFor = ''
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
if '{' in line:
if instruct:
f.write(indent + '{\n')
indent += ' '
elif '}' in line:
if inFor:
f.write('%s%s\n' % (indent, inFor))
inFor = ''
indent = indent[:-4]
if instruct:
instruct = False
f.write(indent + '}\n')
else:
s = indent
if line.startswith('//'):
s += '{}'
elif isDefun(line):
s += 'def {}:'
elif isUseStruct(line):
l = line.split(' ')[1:]
s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1
].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])
s += '{}'
line = ''
elif isDefStruct(line):
s += '{} = \\'
instruct = True
elif 'if' in line or 'while ' in line:
s += '{}:'
elif 'printf' in line and '%' in line:
s += '{})'
first_comma = line.index(',')
line = line[:first_comma] + ' % (' + line[first_comma + 2:]
elif 'for' in line:
line = line[3:].replace('(', '').replace(')', '').strip()
line = [l.strip() for l in line.split(';')]
if line[0] and line[1]:
s += '%s\n%swhile %s:{}' % (line[0], s, line[1])
if not line[0] and line[1]:
s += 'while %s:{}' % line[1]
if line[0] and not line[1]:
s += '%s\n%swhile 1:{}' % (line[0], s)
if not line[0] and not line[1]:
s += 'while 1:{}'
inFor = line[2]
line = ''
elif instruct:
s += '"{}": None,'
elif isClarify(line):
s += '# Clarify `{}` is skiped'
else:
s += '{}'
if isPoint(line):
index = -1
for i in range(line.count('*')):
index = line.index('*', index + 1)
if isChar(line[index + 1]):
line = line[:index] + 'p_' + line[index + 1:]
s = s.format(line.strip())
for i, j in toRepleace:
while i in s:
s = s.replace(i, j)
if not s.strip().startswith('#'):
for i in toDelete:
while i in s:
s = s.replace(i, '')
f.write(s + '\n')
f.write("""if __name__ == "__main__":
main()""")
f.close()
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isDigit(c):
return c > '0' and c < '9'
def isChar(c):
return c > 'a' and c < 'z' or c > 'A' and c < 'Z'
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([(i in line) for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
def isClarify(line):
return sum([line.startswith(i) for i in types]) and '=' not in line
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > index + 1 and isChar(line[index + 1]
) and (sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]
) and '[' in line and ']' in line
def parseInt(s, start=0):
tmp = ''
while start < len(s):
if isDigit(s[start]):
tmp += s[start]
elif len(tmp):
break
start += 1
return int(tmp), start - len(tmp)
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
def main1(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
indent = ''
instruct = False
inFor = ''
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
if '{' in line:
if instruct:
f.write(indent + '{\n')
indent += ' '
elif '}' in line:
if inFor:
f.write('%s%s\n' % (indent, inFor))
inFor = ''
indent = indent[:-4]
if instruct:
instruct = False
f.write(indent + '}\n')
else:
s = indent
if line.startswith('//'):
s += '{}'
elif isDefun(line):
s += 'def {}:'
elif isUseStruct(line):
l = line.split(' ')[1:]
s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1
].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])
s += '{}'
line = ''
elif isDefStruct(line):
s += '{} = \\'
instruct = True
elif 'if' in line or 'while ' in line:
s += '{}:'
elif 'printf' in line and '%' in line:
s += '{})'
first_comma = line.index(',')
line = line[:first_comma] + ' % (' + line[first_comma + 2:]
elif 'for' in line:
line = line[3:].replace('(', '').replace(')', '').strip()
line = [l.strip() for l in line.split(';')]
if line[0] and line[1]:
s += '%s\n%swhile %s:{}' % (line[0], s, line[1])
if not line[0] and line[1]:
s += 'while %s:{}' % line[1]
if line[0] and not line[1]:
s += '%s\n%swhile 1:{}' % (line[0], s)
if not line[0] and not line[1]:
s += 'while 1:{}'
inFor = line[2]
line = ''
elif instruct:
s += '"{}": None,'
elif isClarify(line):
s += '# Clarify `{}` is skiped'
else:
s += '{}'
if isPoint(line):
index = -1
for i in range(line.count('*')):
index = line.index('*', index + 1)
if isChar(line[index + 1]):
line = line[:index] + 'p_' + line[index + 1:]
s = s.format(line.strip())
for i, j in toRepleace:
while i in s:
s = s.replace(i, j)
if not s.strip().startswith('#'):
for i in toDelete:
while i in s:
s = s.replace(i, '')
f.write(s + '\n')
f.write("""if __name__ == "__main__":
main()""")
f.close()
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
if __name__ == '__main__':
main1('test.c', output='replace.py')
<|reserved_special_token_1|>
operators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-', '>',
'>=', '<', '<=', '==', '!=', '&&', '||', '=']
types = ['int ', 'double ', 'float ', 'char ']
toDelete = types + ['struct ']
toRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'), (
'/*', "'''"), ('*/', "'''"), ('//', '#'), ('&&', 'and'), ('||', 'or')]
def isDigit(c):
return c > '0' and c < '9'
def isChar(c):
return c > 'a' and c < 'z' or c > 'A' and c < 'Z'
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([(i in line) for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
def isClarify(line):
return sum([line.startswith(i) for i in types]) and '=' not in line
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > index + 1 and isChar(line[index + 1]
) and (sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]
) and '[' in line and ']' in line
def parseInt(s, start=0):
tmp = ''
while start < len(s):
if isDigit(s[start]):
tmp += s[start]
elif len(tmp):
break
start += 1
return int(tmp), start - len(tmp)
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
def main1(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
indent = ''
instruct = False
inFor = ''
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
if '{' in line:
if instruct:
f.write(indent + '{\n')
indent += ' '
elif '}' in line:
if inFor:
f.write('%s%s\n' % (indent, inFor))
inFor = ''
indent = indent[:-4]
if instruct:
instruct = False
f.write(indent + '}\n')
else:
s = indent
if line.startswith('//'):
s += '{}'
elif isDefun(line):
s += 'def {}:'
elif isUseStruct(line):
l = line.split(' ')[1:]
s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1
].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])
s += '{}'
line = ''
elif isDefStruct(line):
s += '{} = \\'
instruct = True
elif 'if' in line or 'while ' in line:
s += '{}:'
elif 'printf' in line and '%' in line:
s += '{})'
first_comma = line.index(',')
line = line[:first_comma] + ' % (' + line[first_comma + 2:]
elif 'for' in line:
line = line[3:].replace('(', '').replace(')', '').strip()
line = [l.strip() for l in line.split(';')]
if line[0] and line[1]:
s += '%s\n%swhile %s:{}' % (line[0], s, line[1])
if not line[0] and line[1]:
s += 'while %s:{}' % line[1]
if line[0] and not line[1]:
s += '%s\n%swhile 1:{}' % (line[0], s)
if not line[0] and not line[1]:
s += 'while 1:{}'
inFor = line[2]
line = ''
elif instruct:
s += '"{}": None,'
elif isClarify(line):
s += '# Clarify `{}` is skiped'
else:
s += '{}'
if isPoint(line):
index = -1
for i in range(line.count('*')):
index = line.index('*', index + 1)
if isChar(line[index + 1]):
line = line[:index] + 'p_' + line[index + 1:]
s = s.format(line.strip())
for i, j in toRepleace:
while i in s:
s = s.replace(i, j)
if not s.strip().startswith('#'):
for i in toDelete:
while i in s:
s = s.replace(i, '')
f.write(s + '\n')
f.write("""if __name__ == "__main__":
main()""")
f.close()
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
if __name__ == '__main__':
main1('test.c', output='replace.py')
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding=utf-8
operators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-',
'>', '>=', '<', '<=', '==', '!=', '&&', '||', '=']
types = ['int ', 'double ', 'float ', 'char ']
toDelete = types + ['struct ']
toRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'),
('/*', "'''"), ('*/', "'''"), ('//','#'),
('&&', 'and'), ('||', 'or')]
def isDigit(c):
return c > '0' and c < '9'
def isChar(c):
return (c > 'a' and c < 'z') or (c > 'A' and c < 'Z')
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([i in line for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
def isClarify(line):
return sum([line.startswith(i) for i in types]) and '=' not in line
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > (index + 1) and isChar(line[index + 1]) and \
(sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]) and '[' in line and ']' in line
def parseInt(s, start=0):
tmp = ''
while start < len(s):
if isDigit(s[start]):
tmp += s[start]
elif len(tmp):
break
start += 1
return int(tmp), start - len(tmp)
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
def main1(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
indent = ''
instruct = False
inFor = ''
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
if '{' in line:
if instruct:
f.write(indent + '{\n')
indent += ' '
elif '}' in line:
if inFor:
f.write('%s%s\n' % (indent, inFor))
inFor = ''
indent = indent[:-4]
if instruct:
instruct = False
f.write(indent + '}\n')
# indent = indent[:-4]
else:
s = indent
if line.startswith('//'):
s += '{}'
elif isDefun(line):
s += 'def {}:'
elif isUseStruct(line):
l = line.split(' ')[1:]
s += ('{} = [{}.copy() for i in range({})]'
'').format(l[1][:l[1].index('[')],
l[0], parseInt(l[1], l[1].index('['))[0])
s += '{}'
line = ''
elif isDefStruct(line):
# indent += ' '
# s += 'class {}:\n' + indent + 'def __init__(self):'
s += '{} = \\'
instruct = True
elif 'if' in line or 'while ' in line:
s += '{}:'
elif 'printf' in line and '%' in line:
s += '{})'
first_comma = line.index(',')
line = line[:first_comma] + ' % (' + line[first_comma + 2:]
elif 'for' in line:
line = line[3:].replace('(', '').replace(')', '').strip()
line = [l.strip() for l in line.split(';')]
if line[0] and line[1]:
s += '%s\n%swhile %s:{}' % (line[0], s, line[1])
if not line[0] and line[1]:
s += 'while %s:{}' % (line[1])
if line[0] and not line[1]:
s += '%s\n%swhile 1:{}' % (line[0], s)
if not line[0] and not line[1]:
s += 'while 1:{}'
inFor = line[2]
line = ''
elif instruct:
# s += 'self.{} = None'
s += '"{}": None,'
elif isClarify(line):
s += '# Clarify `{}` is skiped'
else:
s += '{}'
if isPoint(line):
index = -1
for i in range(line.count('*')):
index = line.index('*', index + 1)
if isChar(line[index + 1]):
line = line[:index] + 'p_' + line[index + 1:]
s = s.format(line.strip())
for i, j in toRepleace:
while i in s:
s = s.replace(i, j)
if not s.strip().startswith('#'):
for i in toDelete:
while i in s:
s = s.replace(i, '')
f.write(s + '\n')
f.write('if __name__ == "__main__":\n main()')
f.close()
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
if __name__ == '__main__':
main1('test.c', output='replace.py')
# main2('test.c', output='list.py')
|
flexible
|
{
"blob_id": "082e3350c5827ff2ca909084f2d6a206ae21a7e6",
"index": 3240,
"step-1": "<mask token>\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\n<mask token>\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\n<mask token>\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\n<mask token>\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\n<mask token>\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1\n ].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % line[1]\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write(\"\"\"if __name__ == \"__main__\":\n main()\"\"\")\n f.close()\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isDigit(c):\n return c > '0' and c < '9'\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\ndef isClarify(line):\n return sum([line.startswith(i) for i in types]) and '=' not in line\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1\n ].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % line[1]\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write(\"\"\"if __name__ == \"__main__\":\n main()\"\"\")\n f.close()\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\nif __name__ == '__main__':\n main1('test.c', output='replace.py')\n",
"step-4": "operators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-', '>',\n '>=', '<', '<=', '==', '!=', '&&', '||', '=']\ntypes = ['int ', 'double ', 'float ', 'char ']\ntoDelete = types + ['struct ']\ntoRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'), (\n '/*', \"'''\"), ('*/', \"'''\"), ('//', '#'), ('&&', 'and'), ('||', 'or')]\n\n\ndef isDigit(c):\n return c > '0' and c < '9'\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\ndef isClarify(line):\n return sum([line.startswith(i) for i in types]) and '=' not in line\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1\n ].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % line[1]\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write(\"\"\"if __name__ == \"__main__\":\n main()\"\"\")\n f.close()\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\nif __name__ == '__main__':\n main1('test.c', output='replace.py')\n",
"step-5": "#!/usr/bin/env python\n# coding=utf-8\n\noperators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-', \n '>', '>=', '<', '<=', '==', '!=', '&&', '||', '=']\ntypes = ['int ', 'double ', 'float ', 'char ']\ntoDelete = types + ['struct ']\ntoRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'),\n ('/*', \"'''\"), ('*/', \"'''\"), ('//','#'),\n ('&&', 'and'), ('||', 'or')]\n\ndef isDigit(c):\n return c > '0' and c < '9'\n\ndef isChar(c):\n return (c > 'a' and c < 'z') or (c > 'A' and c < 'Z')\n\ndef isOperator(c):\n return c in operators\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([i in line for i in toDelete])\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\ndef isClarify(line):\n return sum([line.startswith(i) for i in types]) and '=' not in line\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > (index + 1) and isChar(line[index + 1]) and \\\n (sum([line.startswith(i) for i in types]) or '=' in line)\n \n\ndef isList(line):\n return sum([line.startswith(i) for i in types]) and '[' in line and ']' in line\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n \ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n# indent = indent[:-4]\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += ('{} = [{}.copy() for i in range({})]'\n '').format(l[1][:l[1].index('[')],\n l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n# indent += ' '\n# s += 'class {}:\\n' + indent + 'def __init__(self):'\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % (line[1])\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n# s += 'self.{} = None'\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write('if __name__ == \"__main__\":\\n main()')\n f.close()\n \ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n \n \n f.close() \n \nif __name__ == '__main__':\n main1('test.c', output='replace.py')\n# main2('test.c', output='list.py')\n ",
"step-ids": [
10,
12,
15,
16,
17
]
}
|
[
10,
12,
15,
16,
17
] |
# Generated by Django 3.2.3 on 2021-05-23 19:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_notebook_smathphone'),
]
operations = [
migrations.RenameModel(
old_name='Smathphone',
new_name='Smartphone',
),
]
|
normal
|
{
"blob_id": "7e11a33d82926ed544640a0192e905d373f575da",
"index": 2766,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0002_notebook_smathphone')]\n operations = [migrations.RenameModel(old_name='Smathphone', new_name=\n 'Smartphone')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0002_notebook_smathphone')]\n operations = [migrations.RenameModel(old_name='Smathphone', new_name=\n 'Smartphone')]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-05-23 19:41\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main_app', '0002_notebook_smathphone'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Smathphone',\n new_name='Smartphone',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from apap.models import *
# Register your models here.
admin.site.register(Doggo)
admin.site.register(Profile)
|
normal
|
{
"blob_id": "22504b466cdeb380b976e23e2708e94131722e11",
"index": 8147,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Doggo)\nadmin.site.register(Profile)\n",
"step-3": "from django.contrib import admin\nfrom apap.models import *\nadmin.site.register(Doggo)\nadmin.site.register(Profile)\n",
"step-4": "from django.contrib import admin\nfrom apap.models import *\n# Register your models here.\n\nadmin.site.register(Doggo)\nadmin.site.register(Profile)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import attr
import click
import guitarpro
import psutil
ALL = object()
@attr.s
class GPTools:
input_file = attr.ib()
output_file = attr.ib()
selected_track_numbers = attr.ib(default=None)
selected_measure_numbers = attr.ib(default=None)
selected_beat_numbers = attr.ib(default=None)
song = None
def parse(self):
if self.input_file is None:
self.input_file = self.find_clipboard()
if self.output_file is None:
self.output_file = self.input_file
self.song = guitarpro.parse(self.input_file)
if self.selected_track_numbers is None:
if self.song.clipboard is not None:
self.selected_track_numbers = list(range(self.song.clipboard.startTrack, self.song.clipboard.stopTrack+1))
else:
self.selected_track_numbers = ALL
if self.selected_measure_numbers is None:
if self.song.clipboard is not None:
self.selected_measure_numbers = list(range(self.song.clipboard.startMeasure, self.song.clipboard.stopMeasure+1))
else:
self.selected_measure_numbers = ALL
if self.selected_beat_numbers is None:
if self.song.clipboard is not None and self.song.clipboard.subBarCopy:
self.selected_beat_numbers = list(range(self.song.clipboard.startBeat, self.song.clipboard.stopBeat+1))
else:
self.selected_beat_numbers = ALL
def find_clipboard(self):
for process in psutil.process_iter():
if process.name().lower() != 'gp5.exe':
continue
break
else:
raise click.ClickException('cannot get Guitar Pro 5 clipboard, is the process running?')
exe_path = process.cmdline()[0]
clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp', 'clipboard.tmp')
return clipboard_path
def write(self):
format = None if self.song.clipboard is None else 'tmp'
guitarpro.write(self.song, self.output_file, format=format)
def selected(self):
for track in self.selected_tracks():
for measure in self.selected_measures(track):
for voice in measure.voices:
for beat in self.selected_beats(voice):
yield track, measure, voice, beat
def selected_tracks(self):
if self.selected_track_numbers is ALL:
yield from self.song.tracks
return
for track in self.song.tracks:
if track.number in self.selected_track_numbers:
yield track
def selected_measures(self, track):
if self.selected_measure_numbers is ALL:
yield from track.measures
return
for measure in track.measures:
if measure.number in self.selected_measure_numbers:
yield measure
def selected_beats(self, voice):
if self.selected_beat_numbers is ALL:
yield from voice.beats
return
for number, beat in enumerate(voice.beats, start=1):
if number in self.selected_beat_numbers:
yield beat
|
normal
|
{
"blob_id": "c6821cb8dd6f8d74ca20c03f87dae321eb869c32",
"index": 2454,
"step-1": "<mask token>\n\n\n@attr.s\nclass GPTools:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n <mask token>\n",
"step-2": "<mask token>\n\n\n@attr.s\nclass GPTools:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n",
"step-3": "<mask token>\n\n\n@attr.s\nclass GPTools:\n input_file = attr.ib()\n output_file = attr.ib()\n selected_track_numbers = attr.ib(default=None)\n selected_measure_numbers = attr.ib(default=None)\n selected_beat_numbers = attr.ib(default=None)\n song = None\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n",
"step-4": "import os\nimport attr\nimport click\nimport guitarpro\nimport psutil\nALL = object()\n\n\n@attr.s\nclass GPTools:\n input_file = attr.ib()\n output_file = attr.ib()\n selected_track_numbers = attr.ib(default=None)\n selected_measure_numbers = attr.ib(default=None)\n selected_beat_numbers = attr.ib(default=None)\n song = None\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n self.song = guitarpro.parse(self.input_file)\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.\n clipboard.startTrack, self.song.clipboard.stopTrack + 1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.\n clipboard.startMeasure, self.song.clipboard.stopMeasure +\n 1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if (self.song.clipboard is not None and self.song.clipboard.\n subBarCopy):\n self.selected_beat_numbers = list(range(self.song.clipboard\n .startBeat, self.song.clipboard.stopBeat + 1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException(\n 'cannot get Guitar Pro 5 clipboard, is the process running?')\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',\n 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n",
"step-5": "import os\n\nimport attr\nimport click\nimport guitarpro\nimport psutil\n\nALL = object()\n\n\n@attr.s\nclass GPTools:\n input_file = attr.ib()\n output_file = attr.ib()\n selected_track_numbers = attr.ib(default=None)\n selected_measure_numbers = attr.ib(default=None)\n selected_beat_numbers = attr.ib(default=None)\n\n song = None\n\n def parse(self):\n if self.input_file is None:\n self.input_file = self.find_clipboard()\n if self.output_file is None:\n self.output_file = self.input_file\n\n self.song = guitarpro.parse(self.input_file)\n\n if self.selected_track_numbers is None:\n if self.song.clipboard is not None:\n self.selected_track_numbers = list(range(self.song.clipboard.startTrack, self.song.clipboard.stopTrack+1))\n else:\n self.selected_track_numbers = ALL\n if self.selected_measure_numbers is None:\n if self.song.clipboard is not None:\n self.selected_measure_numbers = list(range(self.song.clipboard.startMeasure, self.song.clipboard.stopMeasure+1))\n else:\n self.selected_measure_numbers = ALL\n if self.selected_beat_numbers is None:\n if self.song.clipboard is not None and self.song.clipboard.subBarCopy:\n self.selected_beat_numbers = list(range(self.song.clipboard.startBeat, self.song.clipboard.stopBeat+1))\n else:\n self.selected_beat_numbers = ALL\n\n def find_clipboard(self):\n for process in psutil.process_iter():\n if process.name().lower() != 'gp5.exe':\n continue\n break\n else:\n raise click.ClickException('cannot get Guitar Pro 5 clipboard, is the process running?')\n\n exe_path = process.cmdline()[0]\n clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp', 'clipboard.tmp')\n return clipboard_path\n\n def write(self):\n format = None if self.song.clipboard is None else 'tmp'\n guitarpro.write(self.song, self.output_file, format=format)\n\n def selected(self):\n for track in self.selected_tracks():\n for measure in self.selected_measures(track):\n for voice in measure.voices:\n for beat in self.selected_beats(voice):\n yield track, measure, voice, beat\n\n def selected_tracks(self):\n if self.selected_track_numbers is ALL:\n yield from self.song.tracks\n return\n for track in self.song.tracks:\n if track.number in self.selected_track_numbers:\n yield track\n\n def selected_measures(self, track):\n if self.selected_measure_numbers is ALL:\n yield from track.measures\n return\n for measure in track.measures:\n if measure.number in self.selected_measure_numbers:\n yield measure\n\n def selected_beats(self, voice):\n if self.selected_beat_numbers is ALL:\n yield from voice.beats\n return\n for number, beat in enumerate(voice.beats, start=1):\n if number in self.selected_beat_numbers:\n yield beat\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
from layout import UIDump
import Tkinter
from Tkinter import *
from ScriptGenerator import ScriptGen
class Divide_and_Conquer():
def __init__(self, XY):
self.XY = XY
self.user_val = 'None'
self.flag = 'green'
print self.XY
def bounds_Compare(self, bounds, filename):
""" Compares the bounds with Master XY and generates the Script fro given Element. """
# removed "android.widget.Spinner", "android.widget.ExpandableListView" from reqlist, it's interfering with the view.
reqlist = ["android.widget.EditText",
"android.widget.Button", "android.widget.CheckBox", "android.widget.RadioButton", "android.widget.TextView", "android.widget.RelativeLayout",
"android.widget.ImageView", "android.app.Dialogue", "android.view.View"]
ignore_list = [None,'','None']
collection = []
logs = []
count = 0
len_bounds = len(bounds)
for i in bounds:
print '\n ---------------------------------------------- \n'
# print "for every bound block" ----> DEBUG < -----
if int(bounds[count][2]) <= self.XY[1] <= int(bounds[count][3]):
if int(bounds[count][0]) <= self.XY[0] <= int(bounds[count][1]):
# print "current X_Y : ", str(self.XY)
# print "current bounds : ", str(UIDump.bounds[count])
# print "unique id : ", str(UIDump.check_unique_id[count])
# print "resource id : ", str(UIDump.check_resource_id[count])
# print "current text : ", str(UIDump.check_text[count])
# print "in range block" ----> DEBUG < -----
if UIDump.elements[count] in reqlist:
# print "in reqlist block" ----> DEBUG < -----
if UIDump.elements[count] == reqlist[0]:
# print "EditText block" ----> DEBUG < -----
window = Tkinter.Tk()
window.resizable(width=False,height=False);
window.geometry("200x80")
l1=Label(window,width=30,text="Enter Text to Type: ")
l1.pack()
self.entry_id = StringVar()
e1 = Entry(window, width=30,textvariable=self.entry_id)
e1.pack()
def input(args= None):
self.user_val = e1.get()
window.destroy()
if self.resource_id not in ignore_list:
ScriptGen(filename).script("vc.findViewByIdOrRaise('{id}').setText('{text}')\n".format(id=self.resource_id, text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}' on id : '{id}'\n".format(classname =self.classname,text=self.user_val, id=self.resource_id))
elif self.unique_id not in ignore_list:
ScriptGen(filename).script("vc.findViewByIdOrRaise('{id}').setText('{text}')\n".format(id=self.unique_id, text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}'\n".format(classname =self.classname,text=self.user_val))
elif UIDump.check_text[count] not in ignore_list:
ScriptGen(filename).script("vc.findViewWithTextOrRaise('{id_text}').setText('{text}')\n".format(id_text=UIDump.check_text[count], text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}' on Element with text : '{id_text}'\n".format(classname =self.classname,id_text=UIDump.check_text[count], text=self.user_val))
else :
ScriptGen(filename).script("device.touchDip({X},{Y},0)\n".format(X=int(self.XY[0]), Y=int(self.XY[1])))
ScriptGen(filename).log("#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ="Vulnerable",X=int(self.XY[0]), Y=int(self.XY[1])))
def framedestroy():
window.destroy()
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
b1=Button(window,text="Ok",width=10, command = input)
b1.pack(side=LEFT)
b1.place(x=10,y=50)
b2=Button(window, text = "Cancel", width=10, command = framedestroy)
b2.pack(side=RIGHT)
b2.place(x=110,y=50)
window.bind('<Return>', input)
window.mainloop()
self.flag = 'red'
break
elif UIDump.elements[count] in reqlist[1:4]:
# print "Button block" ----> DEBUG < -----
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
if UIDump.check_text[count] not in ignore_list:
log_ = "#LOG({classname}): Clicked on element with text : '{id}'\n".format(classname =self.classname,id=UIDump.check_text[count])
line = "vc.findViewWithTextOrRaise('{id}').touch()\n\tvc.sleep(3)\n".format(id=UIDump.check_text[count])
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif self.resource_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.resource_id)
line = "vc.findViewByIdOrRaise('{id}').touch()\n\tvc.sleep(3)\n".format(id=self.resource_id)
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif self.unique_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.unique_id)
line = "vc.findViewByIdOrRaise('{id_text}').touch()\n\tvc.sleep(3)\n".format(id_text=self.unique_id)
if line not in collection:
collection.append(line)
logs.append(log_)
break
else :
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname =self.classname,X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif UIDump.elements[count] in reqlist[4:]:
# print "remaining views block" ----> DEBUG < -----
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
if UIDump.check_text[count] not in ignore_list:
log_ = "#LOG({classname}): Clicked on element with Text : '{id}'\n".format(classname =self.classname,id=UIDump.check_text[count])
line = "vc.findViewWithTextOrRaise('{id}').touch()\n".format(id=UIDump.check_text[count])
if line not in collection:
collection.append(line)
logs.append(log_)
elif self.resource_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.resource_id)
line = "vc.findViewByIdOrRaise('{id}').touch()\n".format(id=self.resource_id)
if line not in collection:
collection.append(line)
logs.append(log_)
elif self.unique_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.unique_id)
line = "vc.findViewByIdOrRaise('{id_text}').touch()\n".format(id_text=self.unique_id)
if line not in collection:
collection.append(line)
logs.append(log_)
else :
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
else:
# print "not in imp view block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif UIDump.elements[count] in ["android.widget.FrameLayout"]:
# print "FrameLayout block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
count += 1
else :
# print "nothing matches block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
print collection
print logs
# ----> DEBUG < -----
if self.flag == 'green':
ScriptGen(filename).script(collection[-1])
ScriptGen(filename).log(logs[-1])
else:
pass
def main():
Divide_and_Conquer().bounds_Compare(bounds)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "7a65a5522db97a7a113a412883b640feede5bcee",
"index": 909,
"step-1": "from layout import UIDump\nimport Tkinter \nfrom Tkinter import *\nfrom ScriptGenerator import ScriptGen\n\nclass Divide_and_Conquer():\n\n\tdef __init__(self, XY):\n\t\tself.XY = XY\n\t\tself.user_val = 'None'\n\t\tself.flag = 'green'\n\n\t\tprint self.XY\n\t\n\tdef bounds_Compare(self, bounds, filename):\n\t\t\"\"\" Compares the bounds with Master XY and generates the Script fro given Element. \"\"\"\n\n\t\t# removed \"android.widget.Spinner\", \"android.widget.ExpandableListView\" from reqlist, it's interfering with the view.\n \t\t\n \t\treqlist = [\"android.widget.EditText\",\n \t\t\"android.widget.Button\", \"android.widget.CheckBox\", \"android.widget.RadioButton\", \"android.widget.TextView\", \"android.widget.RelativeLayout\",\n \t\t\"android.widget.ImageView\", \"android.app.Dialogue\", \"android.view.View\"]\n\n \t\tignore_list = [None,'','None']\n\t\t\n\t\tcollection = []\n\t\tlogs = []\n\n\t\tcount = 0\n\t\tlen_bounds = len(bounds)\n\t\t\n\t\tfor i in bounds:\n\t\t\tprint '\\n ---------------------------------------------- \\n'\n\t\t\t# print \"for every bound block\" ----> DEBUG < -----\n\t\t\tif int(bounds[count][2]) <= self.XY[1] <= int(bounds[count][3]):\n\t\t\t\tif int(bounds[count][0]) <= self.XY[0] <= int(bounds[count][1]):\n\t\t\t\t\t\n\t\t\t\t\t# print \"current X_Y : \", str(self.XY)\n\t\t\t\t\t# print \"current bounds : \", str(UIDump.bounds[count])\n\t\t\t\t\t# print \"unique id : \", str(UIDump.check_unique_id[count])\n\t\t\t\t\t# print \"resource id : \", str(UIDump.check_resource_id[count])\n\t\t\t\t\t# print \"current text : \", str(UIDump.check_text[count])\n\n\t\t\t\t\t# print \"in range block\" ----> DEBUG < -----\n\t\t\t\t\t\t\n\t\t\t\t\tif UIDump.elements[count] in reqlist:\n\t\t\t\t\t\t# print \"in reqlist block\" ----> DEBUG < -----\n\t\t\t\t\t\t\n\t\t\t\t\t\tif UIDump.elements[count] == reqlist[0]:\n\t\t\t\t\t\t\t# print \"EditText block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\twindow = Tkinter.Tk()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\twindow.resizable(width=False,height=False);\n\t\t\t\t\t\t\twindow.geometry(\"200x80\")\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tl1=Label(window,width=30,text=\"Enter Text to Type: \")\n\t\t\t\t\t\t\tl1.pack()\n\n\t\t\t\t\t\t\tself.entry_id = StringVar() \n\t\t\t\t\t\t\te1 = Entry(window, width=30,textvariable=self.entry_id)\n\t\t\t\t\t\t\te1.pack()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tdef input(args= None):\n\t\t\t\t\t\t\t\tself.user_val = e1.get()\n\t\t\t\t\t\t\t\twindow.destroy()\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewByIdOrRaise('{id}').setText('{text}')\\n\".format(id=self.resource_id, text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}' on id : '{id}'\\n\".format(classname =self.classname,text=self.user_val, id=self.resource_id))\n\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewByIdOrRaise('{id}').setText('{text}')\\n\".format(id=self.unique_id, text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}'\\n\".format(classname =self.classname,text=self.user_val))\n\t\t\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewWithTextOrRaise('{id_text}').setText('{text}')\\n\".format(id_text=UIDump.check_text[count], text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}' on Element with text : '{id_text}'\\n\".format(classname =self.classname,id_text=UIDump.check_text[count], text=self.user_val))\n\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"device.touchDip({X},{Y},0)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1])))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname =\"Vulnerable\",X=int(self.XY[0]), Y=int(self.XY[1])))\n\n\t\t\t\t\t\t\tdef framedestroy():\n\t\t\t\t\t\t\t\twindow.destroy()\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tb1=Button(window,text=\"Ok\",width=10, command = input)\n\t\t\t\t\t\t\tb1.pack(side=LEFT)\n\t\t\t\t\t\t\tb1.place(x=10,y=50)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tb2=Button(window, text = \"Cancel\", width=10, command = framedestroy)\n\t\t\t\t\t\t\tb2.pack(side=RIGHT)\n\t\t\t\t\t\t\tb2.place(x=110,y=50)\n\n\t\t\t\t\t\t\twindow.bind('<Return>', input)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\twindow.mainloop()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tself.flag = 'red'\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\telif UIDump.elements[count] in reqlist[1:4]:\n\t\t\t\t\t\t\t# print \"Button block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on element with text : '{id}'\\n\".format(classname =self.classname,id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tline = \"vc.findViewWithTextOrRaise('{id}').touch()\\n\\tvc.sleep(3)\\n\".format(id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.resource_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id}').touch()\\n\\tvc.sleep(3)\\n\".format(id=self.resource_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.unique_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id_text}').touch()\\n\\tvc.sleep(3)\\n\".format(id_text=self.unique_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname =self.classname,X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\telif UIDump.elements[count] in reqlist[4:]:\n\t\t\t\t\t\t\t# print \"remaining views block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on element with Text : '{id}'\\n\".format(classname =self.classname,id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tline = \"vc.findViewWithTextOrRaise('{id}').touch()\\n\".format(id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\t\telif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.resource_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id}').touch()\\n\".format(id=self.resource_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.unique_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id_text}').touch()\\n\".format(id_text=self.unique_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# print \"not in imp view block\" ----> DEBUG < -----\n\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\telif UIDump.elements[count] in [\"android.widget.FrameLayout\"]:\n\t\t\t\t\t\t# print \"FrameLayout block\" ----> DEBUG < -----\n\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\n\t\t\tcount += 1\n\n\t\telse :\n\t\t\t# print \"nothing matches block\" ----> DEBUG < -----\n\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\tif line not in collection: \n\t\t\t\tcollection.append(line)\n\t\t\t\tlogs.append(log_)\n\n\t\tprint collection\n\t\tprint logs\n\t\t# ----> DEBUG < -----\n\t\t\n\t\tif self.flag == 'green':\n\t\t\tScriptGen(filename).script(collection[-1])\n\t\t\tScriptGen(filename).log(logs[-1])\n\t\telse:\n\t\t\tpass\n\ndef main():\n\tDivide_and_Conquer().bounds_Compare(bounds)\n\nif __name__ == '__main__':\n\tmain()\t",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class SensorDataFrame:
def __init__(self, data):
self.speed, self.steering, self.throttle, self.temp = data
self.timestamp = datetime.now()
def __str__(self):
return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.
throttle, self.temp)
<|reserved_special_token_0|>
def main():
i2c_test()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SensorDataFrame:
def __init__(self, data):
self.speed, self.steering, self.throttle, self.temp = data
self.timestamp = datetime.now()
def __str__(self):
return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.
throttle, self.temp)
def i2c_test():
bus = smbus.SMBus(1)
try:
while True:
start = time.time()
try:
data = bus.read_i2c_block_data(102, 0, 4)
print(str(SensorDataFrame(data)))
except (IOError, TimeoutError, OSError):
pass
time.sleep(0.028)
print('')
print('Time: ' + str(time.time() - start))
print('')
except KeyboardInterrupt:
pass
finally:
bus.close()
def main():
i2c_test()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SENSOR_DATA_FORMAT = """Speed: {} km/h
Steering: {}
Throttle: {}
Temperature: {} C"""
class SensorDataFrame:
def __init__(self, data):
self.speed, self.steering, self.throttle, self.temp = data
self.timestamp = datetime.now()
def __str__(self):
return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.
throttle, self.temp)
def i2c_test():
bus = smbus.SMBus(1)
try:
while True:
start = time.time()
try:
data = bus.read_i2c_block_data(102, 0, 4)
print(str(SensorDataFrame(data)))
except (IOError, TimeoutError, OSError):
pass
time.sleep(0.028)
print('')
print('Time: ' + str(time.time() - start))
print('')
except KeyboardInterrupt:
pass
finally:
bus.close()
def main():
i2c_test()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from datetime import datetime
import time
import smbus
SENSOR_DATA_FORMAT = """Speed: {} km/h
Steering: {}
Throttle: {}
Temperature: {} C"""
class SensorDataFrame:
def __init__(self, data):
self.speed, self.steering, self.throttle, self.temp = data
self.timestamp = datetime.now()
def __str__(self):
return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.
throttle, self.temp)
def i2c_test():
bus = smbus.SMBus(1)
try:
while True:
start = time.time()
try:
data = bus.read_i2c_block_data(102, 0, 4)
print(str(SensorDataFrame(data)))
except (IOError, TimeoutError, OSError):
pass
time.sleep(0.028)
print('')
print('Time: ' + str(time.time() - start))
print('')
except KeyboardInterrupt:
pass
finally:
bus.close()
def main():
i2c_test()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python3
from datetime import datetime
import time
import smbus
SENSOR_DATA_FORMAT = "Speed: {} km/h\nSteering: {}\nThrottle: {}\nTemperature: {} C"
class SensorDataFrame:
def __init__(self, data):
self.speed, self.steering, self.throttle, self.temp = data
self.timestamp = datetime.now()
def __str__(self):
return SENSOR_DATA_FORMAT.format(self.speed, self.steering,
self.throttle, self.temp)
def i2c_test():
bus = smbus.SMBus(1)
# bus.write_block_data(0x66, 0, [2, 32, 1, 0, 23])
try:
while True:
start = time.time()
try:
data = bus.read_i2c_block_data(0x66, 0, 4)
print(str(SensorDataFrame(data)))
except (IOError, TimeoutError, OSError):
pass
time.sleep(0.028)
print("")
print("Time: " + str(time.time() - start))
print("")
except KeyboardInterrupt:
pass
finally:
bus.close()
def main():
i2c_test()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "cf4170760fe6210d8b06f179484258f4ae3f8796",
"index": 7284,
"step-1": "<mask token>\n\n\nclass SensorDataFrame:\n\n def __init__(self, data):\n self.speed, self.steering, self.throttle, self.temp = data\n self.timestamp = datetime.now()\n\n def __str__(self):\n return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.\n throttle, self.temp)\n\n\n<mask token>\n\n\ndef main():\n i2c_test()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SensorDataFrame:\n\n def __init__(self, data):\n self.speed, self.steering, self.throttle, self.temp = data\n self.timestamp = datetime.now()\n\n def __str__(self):\n return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.\n throttle, self.temp)\n\n\ndef i2c_test():\n bus = smbus.SMBus(1)\n try:\n while True:\n start = time.time()\n try:\n data = bus.read_i2c_block_data(102, 0, 4)\n print(str(SensorDataFrame(data)))\n except (IOError, TimeoutError, OSError):\n pass\n time.sleep(0.028)\n print('')\n print('Time: ' + str(time.time() - start))\n print('')\n except KeyboardInterrupt:\n pass\n finally:\n bus.close()\n\n\ndef main():\n i2c_test()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nSENSOR_DATA_FORMAT = \"\"\"Speed: {} km/h\nSteering: {}\nThrottle: {}\nTemperature: {} C\"\"\"\n\n\nclass SensorDataFrame:\n\n def __init__(self, data):\n self.speed, self.steering, self.throttle, self.temp = data\n self.timestamp = datetime.now()\n\n def __str__(self):\n return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.\n throttle, self.temp)\n\n\ndef i2c_test():\n bus = smbus.SMBus(1)\n try:\n while True:\n start = time.time()\n try:\n data = bus.read_i2c_block_data(102, 0, 4)\n print(str(SensorDataFrame(data)))\n except (IOError, TimeoutError, OSError):\n pass\n time.sleep(0.028)\n print('')\n print('Time: ' + str(time.time() - start))\n print('')\n except KeyboardInterrupt:\n pass\n finally:\n bus.close()\n\n\ndef main():\n i2c_test()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from datetime import datetime\nimport time\nimport smbus\nSENSOR_DATA_FORMAT = \"\"\"Speed: {} km/h\nSteering: {}\nThrottle: {}\nTemperature: {} C\"\"\"\n\n\nclass SensorDataFrame:\n\n def __init__(self, data):\n self.speed, self.steering, self.throttle, self.temp = data\n self.timestamp = datetime.now()\n\n def __str__(self):\n return SENSOR_DATA_FORMAT.format(self.speed, self.steering, self.\n throttle, self.temp)\n\n\ndef i2c_test():\n bus = smbus.SMBus(1)\n try:\n while True:\n start = time.time()\n try:\n data = bus.read_i2c_block_data(102, 0, 4)\n print(str(SensorDataFrame(data)))\n except (IOError, TimeoutError, OSError):\n pass\n time.sleep(0.028)\n print('')\n print('Time: ' + str(time.time() - start))\n print('')\n except KeyboardInterrupt:\n pass\n finally:\n bus.close()\n\n\ndef main():\n i2c_test()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n\nfrom datetime import datetime\nimport time\nimport smbus\n\nSENSOR_DATA_FORMAT = \"Speed: {} km/h\\nSteering: {}\\nThrottle: {}\\nTemperature: {} C\"\n\nclass SensorDataFrame:\n\n def __init__(self, data):\n self.speed, self.steering, self.throttle, self.temp = data\n self.timestamp = datetime.now()\n\n def __str__(self):\n return SENSOR_DATA_FORMAT.format(self.speed, self.steering, \n self.throttle, self.temp)\n\n\ndef i2c_test():\n bus = smbus.SMBus(1)\n # bus.write_block_data(0x66, 0, [2, 32, 1, 0, 23])\n try:\n while True:\n start = time.time()\n try:\n data = bus.read_i2c_block_data(0x66, 0, 4)\n print(str(SensorDataFrame(data)))\n except (IOError, TimeoutError, OSError):\n pass\n time.sleep(0.028)\n print(\"\")\n print(\"Time: \" + str(time.time() - start))\n print(\"\")\n except KeyboardInterrupt:\n pass\n finally:\n bus.close()\n\n\ndef main():\n i2c_test()\n\n\nif __name__ == \"__main__\":\n main()\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#!/usr/bin/python
import time
from daemon import runner
import graphitesend
from pywatts import get_data
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/currentcost_daemon.pid'
self.pidfile_timeout = 5
def run(self):
while True:
graphitesend.init(graphite_server='localhost', system_name='', group='power', prefix='house')
try:
watts, temperature = get_data()
graphitesend.send_dict({'temperature':temperature, 'usage':watts})
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
time.sleep(5)
app = App()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action()
|
normal
|
{
"blob_id": "1aa49bc9a3ea12dffff907d17bd40b4425f28e13",
"index": 9829,
"step-1": "#!/usr/bin/python\nimport time\nfrom daemon import runner\nimport graphitesend\nfrom pywatts import get_data\n\nclass App():\n\tdef __init__(self):\n\t\tself.stdin_path = '/dev/null'\n\t\tself.stdout_path = '/dev/tty'\n\t\tself.stderr_path = '/dev/tty'\n\t\tself.pidfile_path = '/tmp/currentcost_daemon.pid'\n\t\tself.pidfile_timeout = 5\n\n\n def run(self):\n while True:\n graphitesend.init(graphite_server='localhost', system_name='', group='power', prefix='house') \n try:\n watts, temperature = get_data()\n graphitesend.send_dict({'temperature':temperature, 'usage':watts})\n time.sleep(5)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n pass\n \n time.sleep(5)\n \n \napp = App()\ndaemon_runner = runner.DaemonRunner(app)\ndaemon_runner.do_action()\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
N = int(input())
l = []
for n in range(N):
x = int(input())
l.append(x)
l.sort()
print(*l, sep='\n')
|
normal
|
{
"blob_id": "a699b43c57c315967a6d1881d7012fee4a93607b",
"index": 6347,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(N):\n x = int(input())\n l.append(x)\nl.sort()\nprint(*l, sep='\\n')\n",
"step-3": "N = int(input())\nl = []\nfor n in range(N):\n x = int(input())\n l.append(x)\nl.sort()\nprint(*l, sep='\\n')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Markdown:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __processSingleLine(self, line):
if self.__isHeading(line):
self.__process('p')
self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())
self.__process('h' + str(len(re.split('\\s', line)[0])))
elif self.__isHeading2(line):
self.__process('h1')
elif self.__isBlankLine(line):
self.__process('p')
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match('^(#{1,6})(\\s)+', line) != None
def __isHeading2(self, line):
if len(self.__analyzing) == 1 and re.match('^[\\=]+$', line) != None:
return True
return False
def __isBlankLine(self, line):
return re.match('^[\n]', line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split('\\s', self.__analyzing[lIndex])
for wIndex in range(len(words)):
if lineIndex1 == -1:
if re.match('^[\\' + markdown + '][\\S]', words[wIndex]):
lineIndex1 = lIndex
wordIndex1 = wIndex
if lineIndex1 >= 0:
if re.match('[\\S]+[\\' + markdown +
'][\\.\\,\\;\\:]*$', words[wIndex]):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if lineIndex2 >= 0:
break
if lineIndex2 >= 0:
newLine1 = re.split('\\s', self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub('^\\' + markdown, '<' + tag + '>',
newLine1[wordIndex1])
self.__analyzing[lineIndex1] = ' '.join(newLine1)
newLine2 = re.split('\\s', self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub('\\' + markdown, '</' + tag + '>',
newLine2[wordIndex2])
self.__analyzing[lineIndex2] = ' '.join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute('_', 'em'):
continue
while self.__convertAttribute('*{2,2}', 'strong'):
continue
while self.__convertAttribute('`', 'code'):
continue
def __convertParagraph(self, tag):
if len(self.__analyzing) > 0:
self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]
self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\n')
) + '</' + tag + '>'
<|reserved_special_token_0|>
def toHTML(self, filepath):
f = open(filepath, 'r')
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Markdown:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __processSingleLine(self, line):
if self.__isHeading(line):
self.__process('p')
self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())
self.__process('h' + str(len(re.split('\\s', line)[0])))
elif self.__isHeading2(line):
self.__process('h1')
elif self.__isBlankLine(line):
self.__process('p')
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match('^(#{1,6})(\\s)+', line) != None
def __isHeading2(self, line):
if len(self.__analyzing) == 1 and re.match('^[\\=]+$', line) != None:
return True
return False
def __isBlankLine(self, line):
return re.match('^[\n]', line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split('\\s', self.__analyzing[lIndex])
for wIndex in range(len(words)):
if lineIndex1 == -1:
if re.match('^[\\' + markdown + '][\\S]', words[wIndex]):
lineIndex1 = lIndex
wordIndex1 = wIndex
if lineIndex1 >= 0:
if re.match('[\\S]+[\\' + markdown +
'][\\.\\,\\;\\:]*$', words[wIndex]):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if lineIndex2 >= 0:
break
if lineIndex2 >= 0:
newLine1 = re.split('\\s', self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub('^\\' + markdown, '<' + tag + '>',
newLine1[wordIndex1])
self.__analyzing[lineIndex1] = ' '.join(newLine1)
newLine2 = re.split('\\s', self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub('\\' + markdown, '</' + tag + '>',
newLine2[wordIndex2])
self.__analyzing[lineIndex2] = ' '.join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute('_', 'em'):
continue
while self.__convertAttribute('*{2,2}', 'strong'):
continue
while self.__convertAttribute('`', 'code'):
continue
def __convertParagraph(self, tag):
if len(self.__analyzing) > 0:
self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]
self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\n')
) + '</' + tag + '>'
def __process(self, tag):
self.__convertFormat()
self.__convertParagraph(tag)
self.__formattedFile.extend(self.__analyzing)
self.__analyzing.clear()
def toHTML(self, filepath):
f = open(filepath, 'r')
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Markdown:
__formattedFile = []
__analyzing = []
def __processSingleLine(self, line):
if self.__isHeading(line):
self.__process('p')
self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())
self.__process('h' + str(len(re.split('\\s', line)[0])))
elif self.__isHeading2(line):
self.__process('h1')
elif self.__isBlankLine(line):
self.__process('p')
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match('^(#{1,6})(\\s)+', line) != None
def __isHeading2(self, line):
if len(self.__analyzing) == 1 and re.match('^[\\=]+$', line) != None:
return True
return False
def __isBlankLine(self, line):
return re.match('^[\n]', line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split('\\s', self.__analyzing[lIndex])
for wIndex in range(len(words)):
if lineIndex1 == -1:
if re.match('^[\\' + markdown + '][\\S]', words[wIndex]):
lineIndex1 = lIndex
wordIndex1 = wIndex
if lineIndex1 >= 0:
if re.match('[\\S]+[\\' + markdown +
'][\\.\\,\\;\\:]*$', words[wIndex]):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if lineIndex2 >= 0:
break
if lineIndex2 >= 0:
newLine1 = re.split('\\s', self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub('^\\' + markdown, '<' + tag + '>',
newLine1[wordIndex1])
self.__analyzing[lineIndex1] = ' '.join(newLine1)
newLine2 = re.split('\\s', self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub('\\' + markdown, '</' + tag + '>',
newLine2[wordIndex2])
self.__analyzing[lineIndex2] = ' '.join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute('_', 'em'):
continue
while self.__convertAttribute('*{2,2}', 'strong'):
continue
while self.__convertAttribute('`', 'code'):
continue
def __convertParagraph(self, tag):
if len(self.__analyzing) > 0:
self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]
self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\n')
) + '</' + tag + '>'
def __process(self, tag):
self.__convertFormat()
self.__convertParagraph(tag)
self.__formattedFile.extend(self.__analyzing)
self.__analyzing.clear()
def toHTML(self, filepath):
f = open(filepath, 'r')
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li)
<|reserved_special_token_1|>
import re
class Markdown:
__formattedFile = []
__analyzing = []
def __processSingleLine(self, line):
if self.__isHeading(line):
self.__process('p')
self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())
self.__process('h' + str(len(re.split('\\s', line)[0])))
elif self.__isHeading2(line):
self.__process('h1')
elif self.__isBlankLine(line):
self.__process('p')
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match('^(#{1,6})(\\s)+', line) != None
def __isHeading2(self, line):
if len(self.__analyzing) == 1 and re.match('^[\\=]+$', line) != None:
return True
return False
def __isBlankLine(self, line):
return re.match('^[\n]', line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split('\\s', self.__analyzing[lIndex])
for wIndex in range(len(words)):
if lineIndex1 == -1:
if re.match('^[\\' + markdown + '][\\S]', words[wIndex]):
lineIndex1 = lIndex
wordIndex1 = wIndex
if lineIndex1 >= 0:
if re.match('[\\S]+[\\' + markdown +
'][\\.\\,\\;\\:]*$', words[wIndex]):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if lineIndex2 >= 0:
break
if lineIndex2 >= 0:
newLine1 = re.split('\\s', self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub('^\\' + markdown, '<' + tag + '>',
newLine1[wordIndex1])
self.__analyzing[lineIndex1] = ' '.join(newLine1)
newLine2 = re.split('\\s', self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub('\\' + markdown, '</' + tag + '>',
newLine2[wordIndex2])
self.__analyzing[lineIndex2] = ' '.join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute('_', 'em'):
continue
while self.__convertAttribute('*{2,2}', 'strong'):
continue
while self.__convertAttribute('`', 'code'):
continue
def __convertParagraph(self, tag):
if len(self.__analyzing) > 0:
self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]
self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\n')
) + '</' + tag + '>'
def __process(self, tag):
self.__convertFormat()
self.__convertParagraph(tag)
self.__formattedFile.extend(self.__analyzing)
self.__analyzing.clear()
def toHTML(self, filepath):
f = open(filepath, 'r')
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li)
<|reserved_special_token_1|>
import re
class Markdown:
__formattedFile = []
__analyzing = []
def __processSingleLine(self, line):
if(self.__isHeading(line)):
self.__process("p")
self.__analyzing.append(re.sub("(#{1,6})", "", line).strip())
self.__process("h" + str(len(re.split("\s", line)[0])))
elif(self.__isHeading2(line)):
self.__process("h1")
elif(self.__isBlankLine(line)):
self.__process("p")
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match("^(#{1,6})(\s)+", line) != None
def __isHeading2(self, line):
if(len(self.__analyzing) == 1 and re.match("^[\=]+$", line) != None):
return True
return False
def __isBlankLine(self, line):
return re.match("^[\n]", line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split("\s", self.__analyzing[lIndex])
for wIndex in range(len(words)):
if(lineIndex1 == -1):
if(re.match("^[\\" + markdown + "][\S]", words[wIndex])):
lineIndex1 = lIndex
wordIndex1 = wIndex
if(lineIndex1 >= 0):
if(re.match("[\S]+[\\" + markdown + "][\.\,\;\:]*$", words[wIndex])):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if(lineIndex2 >= 0):
break
if(lineIndex2 >= 0):
newLine1 = re.split("\s", self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub("^\\" + markdown, "<" + tag + ">", newLine1[wordIndex1])
self.__analyzing[lineIndex1] = " ".join(newLine1)
newLine2 = re.split("\s", self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub("\\" + markdown, "</" + tag + ">", newLine2[wordIndex2])
self.__analyzing[lineIndex2] = " ".join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute("_", "em"): continue
while self.__convertAttribute("*{2,2}", "strong"): continue
while self.__convertAttribute("`", "code"): continue
def __convertParagraph(self, tag):
if(len(self.__analyzing) > 0):
self.__analyzing[0] = "<" + tag + ">" + self.__analyzing[0]
self.__analyzing[-1] = "".join(self.__analyzing[-1].split("\n")) + "</" + tag + ">"
def __process(self, tag):
self.__convertFormat()
self.__convertParagraph(tag)
self.__formattedFile.extend(self.__analyzing)
self.__analyzing.clear()
def toHTML(self, filepath):
f = open(filepath, "r")
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li)
|
flexible
|
{
"blob_id": "13e3337cf9e573b8906fe914a830a8e895af20ba",
"index": 3983,
"step-1": "<mask token>\n\n\nclass Markdown:\n <mask token>\n <mask token>\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n <mask token>\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-2": "<mask token>\n\n\nclass Markdown:\n <mask token>\n <mask token>\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-3": "<mask token>\n\n\nclass Markdown:\n __formattedFile = []\n __analyzing = []\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-4": "import re\n\n\nclass Markdown:\n __formattedFile = []\n __analyzing = []\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-5": "import re\n\nclass Markdown:\n\n __formattedFile = []\n __analyzing = []\n\n \n def __processSingleLine(self, line):\n if(self.__isHeading(line)):\n self.__process(\"p\")\n self.__analyzing.append(re.sub(\"(#{1,6})\", \"\", line).strip())\n self.__process(\"h\" + str(len(re.split(\"\\s\", line)[0])))\n elif(self.__isHeading2(line)):\n self.__process(\"h1\")\n elif(self.__isBlankLine(line)):\n self.__process(\"p\")\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match(\"^(#{1,6})(\\s)+\", line) != None\n\n def __isHeading2(self, line):\n if(len(self.__analyzing) == 1 and re.match(\"^[\\=]+$\", line) != None):\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match(\"^[\\n]\", line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split(\"\\s\", self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if(lineIndex1 == -1):\n if(re.match(\"^[\\\\\" + markdown + \"][\\S]\", words[wIndex])):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if(lineIndex1 >= 0):\n if(re.match(\"[\\S]+[\\\\\" + markdown + \"][\\.\\,\\;\\:]*$\", words[wIndex])):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if(lineIndex2 >= 0):\n break\n if(lineIndex2 >= 0):\n newLine1 = re.split(\"\\s\", self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub(\"^\\\\\" + markdown, \"<\" + tag + \">\", newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = \" \".join(newLine1)\n newLine2 = re.split(\"\\s\", self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub(\"\\\\\" + markdown, \"</\" + tag + \">\", newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = \" \".join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute(\"_\", \"em\"): continue\n while self.__convertAttribute(\"*{2,2}\", \"strong\"): continue\n while self.__convertAttribute(\"`\", \"code\"): continue\n\n def __convertParagraph(self, tag):\n if(len(self.__analyzing) > 0):\n self.__analyzing[0] = \"<\" + tag + \">\" + self.__analyzing[0]\n self.__analyzing[-1] = \"\".join(self.__analyzing[-1].split(\"\\n\")) + \"</\" + tag + \">\"\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, \"r\")\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
# -*- coding: utf-8 -*-
# @Author: huerke
# @Date: 2016-09-03 10:55:54
# @Last Modified by: huerke
# @Last Modified time: 2016-09-03 15:54:50
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
normal
|
{
"blob_id": "021cbd1bd22f9ec48db2e52b2a98be169bbfdbbd",
"index": 5979,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@main.app_errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n",
"step-4": "from flask import render_template\nfrom . import main\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@main.app_errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Author: huerke\n# @Date: 2016-09-03 10:55:54\n# @Last Modified by: huerke\n# @Last Modified time: 2016-09-03 15:54:50\nfrom flask import render_template\nfrom . import main\n\n\n@main.app_errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@main.app_errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*-coding:utf-8-*-
# Author: Scott Larter
import pygame
import pygame.draw
import numpy as np
from agent import *
from tools import *
SCREENSIZE = [1200, 400] # walls.csv
#SCREENSIZE = [1200, 650] # walls2.csv
RESOLUTION = 180
AGENTSNUM = 12
GROUPSNUM = 2
MAXGROUPSIZE = 6
MAXSUBGROUPSIZE = 3
BACKGROUNDCOLOR = [255, 255, 255]
LINECOLOR = [255,0,0]
AGENTSIZE = 9
AGENTTHICKNESS = 3
WALLSFILE = "walls.csv"
pygame.init()
screen = pygame.display.set_mode(SCREENSIZE)
pygame.display.set_caption('Social Force Model - Crosswalk')
clock = pygame.time.Clock()
# initialize walls
walls = []
for line in open(WALLSFILE, newline='', encoding="utf-8-sig"):
coords = line.split(",")
wall = []
wall.append(float(coords[0]))
wall.append(float(coords[1]))
wall.append(float(coords[2]))
wall.append(float(coords[3]))
walls.append(wall)
# initialize agents
agents = []
for n in range(AGENTSNUM):
group_id = (int)(n / MAXGROUPSIZE)
subgroup_id = (int)((n % MAXGROUPSIZE) / MAXSUBGROUPSIZE)
if n % MAXGROUPSIZE == 0:
agents.append([])
if n % MAXSUBGROUPSIZE == 0:
agents[group_id].append([])
agent = Agent(n, group_id, subgroup_id)
agents[group_id][subgroup_id].append(agent)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
(mouseX, mouseY) = pygame.mouse.get_pos()
screen.fill(BACKGROUNDCOLOR)
# draw walls
for wall in walls:
startPos = np.array([wall[0],wall[1]])
endPos = np.array([wall[2],wall[3]])
startPx = startPos*10 #worldCoord2ScreenCoord(startPos,SCREENSIZE,RESOLUTION)
endPx = endPos*10 #worldCoord2ScreenCoord(endPos,SCREENSIZE,RESOLUTION)
pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.astype(int))
for group in agents:
for subgroup in group:
for agent in subgroup:
agent.direction = normalize(agent.dest - agent.pos)
agent.desiredV = agent.desiredSpeed * agent.direction
adapt = agent.adaptVel()
# initial forces values
peopleInter = 0.0
wallInter = 0.0
groupVis = 0.0
groupAtt = 0.0
ownGroupRep = 0.0
otherGroupRep = 0.0
# wall interaction
for wall in walls:
wallInter += agent.wallInteraction(wall)
# people interaction
for groupj in agents:
for subgroupj in groupj:
for agentj in subgroupj:
if agent.agentId != agentj.agentId:
peopleInter += agent.peopleInteraction(agentj)
# list of group members excluding current ped
agentGroup = []
for sub in group:
for mem in sub:
if mem.agentId != agent.agentId:
agentGroup.append(mem)
# group visual and attraction forces
if len(agentGroup) > 0:
groupVis = agent.groupVisual(agentGroup)
groupAtt = agent.groupAttraction(agentGroup + [agent])
# same group repulsion
for agentj in agentGroup:
ownGroupRep += agent.ownGroupRepulsion(agentj)
groupInter = groupVis + groupAtt + ownGroupRep
# other groups repulsion
for gid,g in enumerate(agents):
if gid != agent.groupId:
# create list of 'other group' members
otherGroup = []
for sub in g:
otherGroup += sub
otherGroupRep += agent.otherGroupRepulsion(otherGroup)
#print(otherGroupRep)
# subgroup forces
subgroupForce = agent.subgroupForces(group)
sumForce = adapt + wallInter + peopleInter + groupInter# + otherGroupRep + subgroupForce
accl = sumForce / agent.mass
agent.actualV = agent.actualV + accl*0.5 # consider dt = 0.5
agent.pos = agent.pos + agent.actualV*0.5
if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.Goal == 0):
agent.Goal = 1
agent.timeOut = pygame.time.get_ticks()
#agent.timeOut = clock.get_time()/1000.0
print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut)
for group in agents:
for subgroup in group:
for agent in subgroup:
scPos = (agent.pos*10).astype(int) #worldCoord2ScreenCoord(agent.pos, SCREENSIZE, RESOLUTION)
endPos = ((agent.pos + agent.actualV) * 10).astype(int)
endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)
pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS)
pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)
pygame.draw.line(screen, agent.color, scPos, endPos, 2)
pygame.draw.line(screen, [255,60,0], scPos, endPosDV, 2)
pygame.display.flip()
clock.tick(20)
#clock.get_time
|
normal
|
{
"blob_id": "00051a4087bfcf2e6826e9afa898830dc59aa5ab",
"index": 5451,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npygame.init()\n<mask token>\npygame.display.set_caption('Social Force Model - Crosswalk')\n<mask token>\nfor line in open(WALLSFILE, newline='', encoding='utf-8-sig'):\n coords = line.split(',')\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\n<mask token>\nfor n in range(AGENTSNUM):\n group_id = int(n / MAXGROUPSIZE)\n subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE)\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\n<mask token>\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouseX, mouseY = pygame.mouse.get_pos()\n screen.fill(BACKGROUNDCOLOR)\n for wall in walls:\n startPos = np.array([wall[0], wall[1]])\n endPos = np.array([wall[2], wall[3]])\n startPx = startPos * 10\n endPx = endPos * 10\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.\n astype(int))\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n adapt = agent.adaptVel()\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n groupInter = groupVis + groupAtt + ownGroupRep\n for gid, g in enumerate(agents):\n if gid != agent.groupId:\n otherGroup = []\n for sub in g:\n otherGroup += sub\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n subgroupForce = agent.subgroupForces(group)\n sumForce = adapt + wallInter + peopleInter + groupInter\n accl = sumForce / agent.mass\n agent.actualV = agent.actualV + accl * 0.5\n agent.pos = agent.pos + agent.actualV * 0.5\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.\n Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n print('Agent ', agent.agentId, 'reached goal at ',\n agent.timeOut)\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos * 10).astype(int)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE,\n AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2)\n pygame.display.flip()\n clock.tick(20)\n",
"step-3": "<mask token>\nSCREENSIZE = [1200, 400]\nRESOLUTION = 180\nAGENTSNUM = 12\nGROUPSNUM = 2\nMAXGROUPSIZE = 6\nMAXSUBGROUPSIZE = 3\nBACKGROUNDCOLOR = [255, 255, 255]\nLINECOLOR = [255, 0, 0]\nAGENTSIZE = 9\nAGENTTHICKNESS = 3\nWALLSFILE = 'walls.csv'\npygame.init()\nscreen = pygame.display.set_mode(SCREENSIZE)\npygame.display.set_caption('Social Force Model - Crosswalk')\nclock = pygame.time.Clock()\nwalls = []\nfor line in open(WALLSFILE, newline='', encoding='utf-8-sig'):\n coords = line.split(',')\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\nagents = []\nfor n in range(AGENTSNUM):\n group_id = int(n / MAXGROUPSIZE)\n subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE)\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouseX, mouseY = pygame.mouse.get_pos()\n screen.fill(BACKGROUNDCOLOR)\n for wall in walls:\n startPos = np.array([wall[0], wall[1]])\n endPos = np.array([wall[2], wall[3]])\n startPx = startPos * 10\n endPx = endPos * 10\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.\n astype(int))\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n adapt = agent.adaptVel()\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n groupInter = groupVis + groupAtt + ownGroupRep\n for gid, g in enumerate(agents):\n if gid != agent.groupId:\n otherGroup = []\n for sub in g:\n otherGroup += sub\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n subgroupForce = agent.subgroupForces(group)\n sumForce = adapt + wallInter + peopleInter + groupInter\n accl = sumForce / agent.mass\n agent.actualV = agent.actualV + accl * 0.5\n agent.pos = agent.pos + agent.actualV * 0.5\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.\n Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n print('Agent ', agent.agentId, 'reached goal at ',\n agent.timeOut)\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos * 10).astype(int)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE,\n AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2)\n pygame.display.flip()\n clock.tick(20)\n",
"step-4": "import pygame\nimport pygame.draw\nimport numpy as np\nfrom agent import *\nfrom tools import *\nSCREENSIZE = [1200, 400]\nRESOLUTION = 180\nAGENTSNUM = 12\nGROUPSNUM = 2\nMAXGROUPSIZE = 6\nMAXSUBGROUPSIZE = 3\nBACKGROUNDCOLOR = [255, 255, 255]\nLINECOLOR = [255, 0, 0]\nAGENTSIZE = 9\nAGENTTHICKNESS = 3\nWALLSFILE = 'walls.csv'\npygame.init()\nscreen = pygame.display.set_mode(SCREENSIZE)\npygame.display.set_caption('Social Force Model - Crosswalk')\nclock = pygame.time.Clock()\nwalls = []\nfor line in open(WALLSFILE, newline='', encoding='utf-8-sig'):\n coords = line.split(',')\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\nagents = []\nfor n in range(AGENTSNUM):\n group_id = int(n / MAXGROUPSIZE)\n subgroup_id = int(n % MAXGROUPSIZE / MAXSUBGROUPSIZE)\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouseX, mouseY = pygame.mouse.get_pos()\n screen.fill(BACKGROUNDCOLOR)\n for wall in walls:\n startPos = np.array([wall[0], wall[1]])\n endPos = np.array([wall[2], wall[3]])\n startPx = startPos * 10\n endPx = endPos * 10\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.\n astype(int))\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n adapt = agent.adaptVel()\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n groupInter = groupVis + groupAtt + ownGroupRep\n for gid, g in enumerate(agents):\n if gid != agent.groupId:\n otherGroup = []\n for sub in g:\n otherGroup += sub\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n subgroupForce = agent.subgroupForces(group)\n sumForce = adapt + wallInter + peopleInter + groupInter\n accl = sumForce / agent.mass\n agent.actualV = agent.actualV + accl * 0.5\n agent.pos = agent.pos + agent.actualV * 0.5\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.\n Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n print('Agent ', agent.agentId, 'reached goal at ',\n agent.timeOut)\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos * 10).astype(int)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE,\n AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255, 60, 0], scPos, endPosDV, 2)\n pygame.display.flip()\n clock.tick(20)\n",
"step-5": "# -*-coding:utf-8-*-\n# Author: Scott Larter\n\nimport pygame\nimport pygame.draw\nimport numpy as np\nfrom agent import *\nfrom tools import *\n\n\nSCREENSIZE = [1200, 400] # walls.csv\n#SCREENSIZE = [1200, 650] # walls2.csv\nRESOLUTION = 180\nAGENTSNUM = 12\nGROUPSNUM = 2\nMAXGROUPSIZE = 6\nMAXSUBGROUPSIZE = 3\nBACKGROUNDCOLOR = [255, 255, 255]\nLINECOLOR = [255,0,0]\nAGENTSIZE = 9\nAGENTTHICKNESS = 3\nWALLSFILE = \"walls.csv\"\n\npygame.init()\nscreen = pygame.display.set_mode(SCREENSIZE)\npygame.display.set_caption('Social Force Model - Crosswalk')\nclock = pygame.time.Clock()\n\n# initialize walls\nwalls = []\nfor line in open(WALLSFILE, newline='', encoding=\"utf-8-sig\"):\n coords = line.split(\",\")\n wall = []\n wall.append(float(coords[0]))\n wall.append(float(coords[1]))\n wall.append(float(coords[2]))\n wall.append(float(coords[3]))\n walls.append(wall)\n\n\n# initialize agents\nagents = []\n\nfor n in range(AGENTSNUM):\n group_id = (int)(n / MAXGROUPSIZE)\n subgroup_id = (int)((n % MAXGROUPSIZE) / MAXSUBGROUPSIZE)\n\n if n % MAXGROUPSIZE == 0:\n agents.append([])\n\n if n % MAXSUBGROUPSIZE == 0:\n agents[group_id].append([])\n\n agent = Agent(n, group_id, subgroup_id)\n agents[group_id][subgroup_id].append(agent)\n\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n (mouseX, mouseY) = pygame.mouse.get_pos()\n\n screen.fill(BACKGROUNDCOLOR)\n\n # draw walls\n for wall in walls:\n startPos = np.array([wall[0],wall[1]])\n endPos = np.array([wall[2],wall[3]])\n startPx = startPos*10 #worldCoord2ScreenCoord(startPos,SCREENSIZE,RESOLUTION)\n endPx = endPos*10 #worldCoord2ScreenCoord(endPos,SCREENSIZE,RESOLUTION)\n pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.astype(int))\n\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n agent.direction = normalize(agent.dest - agent.pos)\n agent.desiredV = agent.desiredSpeed * agent.direction\n\n adapt = agent.adaptVel()\n\n # initial forces values\n peopleInter = 0.0\n wallInter = 0.0\n groupVis = 0.0\n groupAtt = 0.0\n ownGroupRep = 0.0\n otherGroupRep = 0.0\n\n # wall interaction\n for wall in walls:\n wallInter += agent.wallInteraction(wall)\n\n # people interaction\n for groupj in agents:\n for subgroupj in groupj:\n for agentj in subgroupj:\n\n if agent.agentId != agentj.agentId:\n peopleInter += agent.peopleInteraction(agentj)\n\n # list of group members excluding current ped\n agentGroup = []\n for sub in group:\n for mem in sub:\n if mem.agentId != agent.agentId:\n agentGroup.append(mem)\n\n # group visual and attraction forces\n if len(agentGroup) > 0:\n groupVis = agent.groupVisual(agentGroup)\n groupAtt = agent.groupAttraction(agentGroup + [agent])\n\n # same group repulsion\n for agentj in agentGroup:\n ownGroupRep += agent.ownGroupRepulsion(agentj)\n\n groupInter = groupVis + groupAtt + ownGroupRep\n\n # other groups repulsion\n for gid,g in enumerate(agents):\n if gid != agent.groupId:\n # create list of 'other group' members\n otherGroup = []\n for sub in g:\n otherGroup += sub\n\n otherGroupRep += agent.otherGroupRepulsion(otherGroup)\n\n #print(otherGroupRep)\n\n # subgroup forces\n subgroupForce = agent.subgroupForces(group)\n\n sumForce = adapt + wallInter + peopleInter + groupInter# + otherGroupRep + subgroupForce\n\n accl = sumForce / agent.mass\n\n agent.actualV = agent.actualV + accl*0.5 # consider dt = 0.5\n\n agent.pos = agent.pos + agent.actualV*0.5\n\n if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.Goal == 0):\n agent.Goal = 1\n agent.timeOut = pygame.time.get_ticks()\n #agent.timeOut = clock.get_time()/1000.0\n print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut)\n\n for group in agents:\n for subgroup in group:\n for agent in subgroup:\n scPos = (agent.pos*10).astype(int) #worldCoord2ScreenCoord(agent.pos, SCREENSIZE, RESOLUTION)\n endPos = ((agent.pos + agent.actualV) * 10).astype(int)\n endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)\n\n pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS)\n pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)\n pygame.draw.line(screen, agent.color, scPos, endPos, 2)\n pygame.draw.line(screen, [255,60,0], scPos, endPosDV, 2)\n\n pygame.display.flip()\n clock.tick(20)\n #clock.get_time",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import datetime
class Location:
def __init__(self, location_dict):
self.x = location_dict['x']
self.y = location_dict['y']
self.id = location_dict['id']
self.events = []
self.latest_average_value = 0
self.latest_event_count = 0
self.average_value_at_time_dict = {}
self.overall_average_value = 0
def update_average_values_at_time(self, time_to_calculate):
self.latest_event_count = 0
sum_of_values = 0
for event in self.events:
if event.time_rounded_to_minute == time_to_calculate:
# remove event from self.events
# remove event id from event_id_set in main
sum_of_values += event.value
self.latest_event_count += 1
self.latest_average_value = 0
if self.latest_event_count > 0:
self.latest_average_value = sum_of_values / self.latest_event_count
formatted_time = datetime.strftime(datetime.utcfromtimestamp(time_to_calculate + 3600), "%d/%m/%Y %H:%M:%S")
self.average_value_at_time_dict[formatted_time] = self.latest_average_value
def update_overall_average_value(self):
value_sum = 0
for event in self.events:
value_sum += event.value
value_count = len(self.events)
if value_count > 0:
self.overall_average_value = value_sum / value_count
|
normal
|
{
"blob_id": "efbfe95acbe0b97e863c8788bca4a71633da36b3",
"index": 1906,
"step-1": "<mask token>\n\n\nclass Location:\n <mask token>\n <mask token>\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-2": "<mask token>\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n self.events = []\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n <mask token>\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-3": "<mask token>\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n self.events = []\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n\n def update_average_values_at_time(self, time_to_calculate):\n self.latest_event_count = 0\n sum_of_values = 0\n for event in self.events:\n if event.time_rounded_to_minute == time_to_calculate:\n sum_of_values += event.value\n self.latest_event_count += 1\n self.latest_average_value = 0\n if self.latest_event_count > 0:\n self.latest_average_value = sum_of_values / self.latest_event_count\n formatted_time = datetime.strftime(datetime.utcfromtimestamp(\n time_to_calculate + 3600), '%d/%m/%Y %H:%M:%S')\n self.average_value_at_time_dict[formatted_time\n ] = self.latest_average_value\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-4": "from datetime import datetime\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n self.events = []\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n\n def update_average_values_at_time(self, time_to_calculate):\n self.latest_event_count = 0\n sum_of_values = 0\n for event in self.events:\n if event.time_rounded_to_minute == time_to_calculate:\n sum_of_values += event.value\n self.latest_event_count += 1\n self.latest_average_value = 0\n if self.latest_event_count > 0:\n self.latest_average_value = sum_of_values / self.latest_event_count\n formatted_time = datetime.strftime(datetime.utcfromtimestamp(\n time_to_calculate + 3600), '%d/%m/%Y %H:%M:%S')\n self.average_value_at_time_dict[formatted_time\n ] = self.latest_average_value\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-5": "from datetime import datetime\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n\n self.events = []\n\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n\n def update_average_values_at_time(self, time_to_calculate):\n self.latest_event_count = 0\n sum_of_values = 0\n for event in self.events:\n if event.time_rounded_to_minute == time_to_calculate:\n # remove event from self.events\n # remove event id from event_id_set in main\n sum_of_values += event.value\n self.latest_event_count += 1\n self.latest_average_value = 0\n if self.latest_event_count > 0:\n self.latest_average_value = sum_of_values / self.latest_event_count\n\n formatted_time = datetime.strftime(datetime.utcfromtimestamp(time_to_calculate + 3600), \"%d/%m/%Y %H:%M:%S\")\n self.average_value_at_time_dict[formatted_time] = self.latest_average_value\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def generate_mutation(base):
"""
Taking into account the current base, base, return a mutation.
"""
if base in ['A', 'C', 'G', 'T']:
bases = ['A', 'C', 'G', 'T']
bases.remove(base)
return np.random.choice(bases)
else:
raise Exception('base is not a proper DNA nucleotide (ACGT).')
def introduce_random_mutations(vntr, m):
"""
Generate a VNTR sequence with random mutations. The mutations will be the same across different copies.
Params
------
- vntr, the DNA copy sequence which is copied.
- m, the number of SNP mutations that will be randomly introduced.
Returns
-------
A single copy of the VNTR sequence with m mutations. """
mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)
m_vntr = []
for site, nucleotide in enumerate(vntr):
if site in mutation_sites:
m_vntr.append(generate_mutation(nucleotide))
else:
m_vntr.append(nucleotide)
return ''.join(m_vntr)
def introduce_specific_mutations(vntr, sites, mutations):
"""
Generate a VNTR sequence with the specified mutations at the specified sites.
Params
------
- vntr, the DNA copy sequence which is copied.
- sites, locus where the SNP mutation will be introduced.
- mutations, a list of mutations.
Returns
-------
A single copy of the VNTR sequence with mutations at the specified sites.
"""
if len(sites) != len(mutations):
raise Exception('The number of sites and mutations do not correspond.')
m_vntr = list(vntr)
for site, nucleotide in enumerate(m_vntr):
if site in sites:
mut_idx = sites.index(site)
if nucleotide == mutations[mut_idx]:
raise Exception(
'Not a mutation. The current site is {}. The current '.
format(site) +
'nucleotide is {}. Please use a different nucleotide '.
format(nucleotide) + 'for this site.')
else:
m_vntr[site] = mutations[mut_idx]
return ''.join(m_vntr)
<|reserved_special_token_0|>
def write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):
with open(fn, write_mode) as f:
f.write('>{}\n'.format(sequence_name))
div = len(sequence) / rlen
fasta_seq = []
for i in range(div):
f.write('{}\n'.format(sequence[i * rlen:(i + 1) * rlen]))
f.write('{}\n'.format(sequence[div * rlen:]))
<|reserved_special_token_0|>
def critical_copy_number(rlen, clen):
"""
Determines the minimum number of VNTR copies needed
so a read can be completely mapped inside of a VNTR.
"""
if rlen < clen:
raise Exception('clen is larger than rlen.')
elif rlen % clen > 0:
return int(math.ceil(float(rlen) / clen))
else:
return 1 + rlen / clen
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_mutation(base):
"""
Taking into account the current base, base, return a mutation.
"""
if base in ['A', 'C', 'G', 'T']:
bases = ['A', 'C', 'G', 'T']
bases.remove(base)
return np.random.choice(bases)
else:
raise Exception('base is not a proper DNA nucleotide (ACGT).')
def introduce_random_mutations(vntr, m):
"""
Generate a VNTR sequence with random mutations. The mutations will be the same across different copies.
Params
------
- vntr, the DNA copy sequence which is copied.
- m, the number of SNP mutations that will be randomly introduced.
Returns
-------
A single copy of the VNTR sequence with m mutations. """
mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)
m_vntr = []
for site, nucleotide in enumerate(vntr):
if site in mutation_sites:
m_vntr.append(generate_mutation(nucleotide))
else:
m_vntr.append(nucleotide)
return ''.join(m_vntr)
def introduce_specific_mutations(vntr, sites, mutations):
"""
Generate a VNTR sequence with the specified mutations at the specified sites.
Params
------
- vntr, the DNA copy sequence which is copied.
- sites, locus where the SNP mutation will be introduced.
- mutations, a list of mutations.
Returns
-------
A single copy of the VNTR sequence with mutations at the specified sites.
"""
if len(sites) != len(mutations):
raise Exception('The number of sites and mutations do not correspond.')
m_vntr = list(vntr)
for site, nucleotide in enumerate(m_vntr):
if site in sites:
mut_idx = sites.index(site)
if nucleotide == mutations[mut_idx]:
raise Exception(
'Not a mutation. The current site is {}. The current '.
format(site) +
'nucleotide is {}. Please use a different nucleotide '.
format(nucleotide) + 'for this site.')
else:
m_vntr[site] = mutations[mut_idx]
return ''.join(m_vntr)
<|reserved_special_token_0|>
def generate_sequence_with_vntr(sequence, loc, vntr):
nseq = sequence[0:loc]
nseq += vntr
nseq += sequence[loc:]
return nseq
<|reserved_special_token_0|>
def write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):
with open(fn, write_mode) as f:
f.write('>{}\n'.format(sequence_name))
div = len(sequence) / rlen
fasta_seq = []
for i in range(div):
f.write('{}\n'.format(sequence[i * rlen:(i + 1) * rlen]))
f.write('{}\n'.format(sequence[div * rlen:]))
<|reserved_special_token_0|>
def critical_copy_number(rlen, clen):
"""
Determines the minimum number of VNTR copies needed
so a read can be completely mapped inside of a VNTR.
"""
if rlen < clen:
raise Exception('clen is larger than rlen.')
elif rlen % clen > 0:
return int(math.ceil(float(rlen) / clen))
else:
return 1 + rlen / clen
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
subprocess.call('mkdir -p {}'.format(output_dir), shell=True)
<|reserved_special_token_0|>
parser.add_argument('len', metavar='seqLen', type=int, help=
'The length of the sequences.')
parser.add_argument('vntr', metavar='VNTR', type=str, help=
'The VNTR that will be introduced.', default=
'GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')
parser.add_argument('numVNTR', metavar='numVNTR', type=int, help=
'The number of VNTR copies that will be introduced.')
parser.add_argument('numMuts', metavar='numMuts', type=int, help=
'The number of mutations per copy.')
parser.add_argument('--mutation_type', metavar='mutType', type=str, choices
=['individual_random_mutations', 'group_random_mutations',
'specific_mutations'], default='individual_random_mutations', help=
'Copies of the VNTR can different mutations. Specify ' +
'mutation_type to simulate different mutational ' +
"""events in the VNTR copies.
""" + 'Choices:\n' +
'individual_random_mutations,\n' + 'group_random_mutations, and\n' +
'specific_mutations.')
parser.add_argument('--rlen', metavar='read length', type=int, help=
'The size of the output sequences.', default=150)
parser.add_argument('--loc', metavar='locus', type=int, help=
'The location where the snps are inserted.')
parser.add_argument('--outer_pad', action='store_true', help=
'Adds a padding around the VNTR for visual aid.', default=False)
parser.add_argument('--inner_pad', action='store_true', help=
'Adds a padding between copies of the VNTR for visual aid.', default=False)
parser.add_argument('-o', metavar='outputPrefix', type=str, help=
'The prefix of the output filename.')
parser.add_argument('--gen_ref', action='store_true', help=
'Generate a reference file as well which has a single copy of the VNTR.')
<|reserved_special_token_0|>
def generate_mutation(base):
"""
Taking into account the current base, base, return a mutation.
"""
if base in ['A', 'C', 'G', 'T']:
bases = ['A', 'C', 'G', 'T']
bases.remove(base)
return np.random.choice(bases)
else:
raise Exception('base is not a proper DNA nucleotide (ACGT).')
def introduce_random_mutations(vntr, m):
"""
Generate a VNTR sequence with random mutations. The mutations will be the same across different copies.
Params
------
- vntr, the DNA copy sequence which is copied.
- m, the number of SNP mutations that will be randomly introduced.
Returns
-------
A single copy of the VNTR sequence with m mutations. """
mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)
m_vntr = []
for site, nucleotide in enumerate(vntr):
if site in mutation_sites:
m_vntr.append(generate_mutation(nucleotide))
else:
m_vntr.append(nucleotide)
return ''.join(m_vntr)
def introduce_specific_mutations(vntr, sites, mutations):
"""
Generate a VNTR sequence with the specified mutations at the specified sites.
Params
------
- vntr, the DNA copy sequence which is copied.
- sites, locus where the SNP mutation will be introduced.
- mutations, a list of mutations.
Returns
-------
A single copy of the VNTR sequence with mutations at the specified sites.
"""
if len(sites) != len(mutations):
raise Exception('The number of sites and mutations do not correspond.')
m_vntr = list(vntr)
for site, nucleotide in enumerate(m_vntr):
if site in sites:
mut_idx = sites.index(site)
if nucleotide == mutations[mut_idx]:
raise Exception(
'Not a mutation. The current site is {}. The current '.
format(site) +
'nucleotide is {}. Please use a different nucleotide '.
format(nucleotide) + 'for this site.')
else:
m_vntr[site] = mutations[mut_idx]
return ''.join(m_vntr)
<|reserved_special_token_0|>
if loc == None:
loc = args.len / 2
<|reserved_special_token_0|>
if args.mutation_type == 'individual_random_mutations':
new_vntr = []
for i in range(args.numVNTR):
new_vntr.append(introduce_random_mutations(vntr, args.numMuts))
elif args.mutation_type == 'group_random_mutations':
new_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR
elif args.mutation_type == 'specific_mutations':
new_vntr = introduce_specific_mutations(vntr, [0], ['C'])
if args.inner_pad == True:
new_vntr = ' '.join(new_vntr)
else:
new_vntr = ''.join(new_vntr)
if args.outer_pad == True:
padding = ' ' * 10
new_vntr = padding + new_vntr + padding
def generate_sequence_with_vntr(sequence, loc, vntr):
nseq = sequence[0:loc]
nseq += vntr
nseq += sequence[loc:]
return nseq
<|reserved_special_token_0|>
subprocess.call('mkdir -p {}'.format(sample_dir), shell=True)
def write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):
with open(fn, write_mode) as f:
f.write('>{}\n'.format(sequence_name))
div = len(sequence) / rlen
fasta_seq = []
for i in range(div):
f.write('{}\n'.format(sequence[i * rlen:(i + 1) * rlen]))
f.write('{}\n'.format(sequence[div * rlen:]))
if args.o != None:
write_sequence(args.o, args.rlen, n_sequence)
def critical_copy_number(rlen, clen):
"""
Determines the minimum number of VNTR copies needed
so a read can be completely mapped inside of a VNTR.
"""
if rlen < clen:
raise Exception('clen is larger than rlen.')
elif rlen % clen > 0:
return int(math.ceil(float(rlen) / clen))
else:
return 1 + rlen / clen
if args.gen_ref:
ccn = critical_copy_number(args.rlen, len(vntr))
num_seqs = int(math.ceil(float(150) / len(vntr)))
fn = args.o.replace('.fa', '_reference.fa')
if os.path.exists(fn):
os.remove(fn)
for i in range(0, ccn + 1):
r_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)
write_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.
format(i), write_mode='a')
bed_fn = args.o.replace('.fa', '_reference.bed')
with open(bed_fn, 'w') as f:
for i in range(0, ccn + 1):
sequence_name = 'seq{}'.format(i)
wrt = [sequence_name, loc, loc + len(vntr * i)]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
bed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')
with open(bed_fn, 'w') as f:
for i in range(0, ccn + 1):
sequence_name = 'seq{}'.format(i)
wrt = [sequence_name, 0, loc]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
wrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)
]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
subprocess.call('bwa index {}'.format(fn), shell=True)
<|reserved_special_token_1|>
import argparse
import os
import sys
import time
import numpy as np
import copy
import subprocess
import math
project_dir = os.path.join(sys.argv[0], '../../')
project_dir = os.path.abspath(project_dir)
output_dir = os.path.join(project_dir, 'output/', 'pipeline/', 'sample/')
subprocess.call('mkdir -p {}'.format(output_dir), shell=True)
parser = argparse.ArgumentParser(description=
'Generate a DNA sequence containing a VNTR sequence.')
parser.add_argument('len', metavar='seqLen', type=int, help=
'The length of the sequences.')
parser.add_argument('vntr', metavar='VNTR', type=str, help=
'The VNTR that will be introduced.', default=
'GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')
parser.add_argument('numVNTR', metavar='numVNTR', type=int, help=
'The number of VNTR copies that will be introduced.')
parser.add_argument('numMuts', metavar='numMuts', type=int, help=
'The number of mutations per copy.')
parser.add_argument('--mutation_type', metavar='mutType', type=str, choices
=['individual_random_mutations', 'group_random_mutations',
'specific_mutations'], default='individual_random_mutations', help=
'Copies of the VNTR can different mutations. Specify ' +
'mutation_type to simulate different mutational ' +
"""events in the VNTR copies.
""" + 'Choices:\n' +
'individual_random_mutations,\n' + 'group_random_mutations, and\n' +
'specific_mutations.')
parser.add_argument('--rlen', metavar='read length', type=int, help=
'The size of the output sequences.', default=150)
parser.add_argument('--loc', metavar='locus', type=int, help=
'The location where the snps are inserted.')
parser.add_argument('--outer_pad', action='store_true', help=
'Adds a padding around the VNTR for visual aid.', default=False)
parser.add_argument('--inner_pad', action='store_true', help=
'Adds a padding between copies of the VNTR for visual aid.', default=False)
parser.add_argument('-o', metavar='outputPrefix', type=str, help=
'The prefix of the output filename.')
parser.add_argument('--gen_ref', action='store_true', help=
'Generate a reference file as well which has a single copy of the VNTR.')
args = parser.parse_args()
def generate_mutation(base):
"""
Taking into account the current base, base, return a mutation.
"""
if base in ['A', 'C', 'G', 'T']:
bases = ['A', 'C', 'G', 'T']
bases.remove(base)
return np.random.choice(bases)
else:
raise Exception('base is not a proper DNA nucleotide (ACGT).')
def introduce_random_mutations(vntr, m):
"""
Generate a VNTR sequence with random mutations. The mutations will be the same across different copies.
Params
------
- vntr, the DNA copy sequence which is copied.
- m, the number of SNP mutations that will be randomly introduced.
Returns
-------
A single copy of the VNTR sequence with m mutations. """
mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)
m_vntr = []
for site, nucleotide in enumerate(vntr):
if site in mutation_sites:
m_vntr.append(generate_mutation(nucleotide))
else:
m_vntr.append(nucleotide)
return ''.join(m_vntr)
def introduce_specific_mutations(vntr, sites, mutations):
"""
Generate a VNTR sequence with the specified mutations at the specified sites.
Params
------
- vntr, the DNA copy sequence which is copied.
- sites, locus where the SNP mutation will be introduced.
- mutations, a list of mutations.
Returns
-------
A single copy of the VNTR sequence with mutations at the specified sites.
"""
if len(sites) != len(mutations):
raise Exception('The number of sites and mutations do not correspond.')
m_vntr = list(vntr)
for site, nucleotide in enumerate(m_vntr):
if site in sites:
mut_idx = sites.index(site)
if nucleotide == mutations[mut_idx]:
raise Exception(
'Not a mutation. The current site is {}. The current '.
format(site) +
'nucleotide is {}. Please use a different nucleotide '.
format(nucleotide) + 'for this site.')
else:
m_vntr[site] = mutations[mut_idx]
return ''.join(m_vntr)
loc = args.loc
if loc == None:
loc = args.len / 2
sequence = ''.join(np.random.choice(['A', 'C', 'G', 'T'], size=args.len))
vntr = args.vntr
if args.mutation_type == 'individual_random_mutations':
new_vntr = []
for i in range(args.numVNTR):
new_vntr.append(introduce_random_mutations(vntr, args.numMuts))
elif args.mutation_type == 'group_random_mutations':
new_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR
elif args.mutation_type == 'specific_mutations':
new_vntr = introduce_specific_mutations(vntr, [0], ['C'])
if args.inner_pad == True:
new_vntr = ' '.join(new_vntr)
else:
new_vntr = ''.join(new_vntr)
if args.outer_pad == True:
padding = ' ' * 10
new_vntr = padding + new_vntr + padding
def generate_sequence_with_vntr(sequence, loc, vntr):
nseq = sequence[0:loc]
nseq += vntr
nseq += sequence[loc:]
return nseq
n_sequence = generate_sequence_with_vntr(sequence, loc, new_vntr)
sample = os.path.split(args.o)[-1]
sample = sample.split('.')[0]
sample_dir = os.path.join(output_dir, sample)
subprocess.call('mkdir -p {}'.format(sample_dir), shell=True)
def write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):
with open(fn, write_mode) as f:
f.write('>{}\n'.format(sequence_name))
div = len(sequence) / rlen
fasta_seq = []
for i in range(div):
f.write('{}\n'.format(sequence[i * rlen:(i + 1) * rlen]))
f.write('{}\n'.format(sequence[div * rlen:]))
if args.o != None:
write_sequence(args.o, args.rlen, n_sequence)
def critical_copy_number(rlen, clen):
"""
Determines the minimum number of VNTR copies needed
so a read can be completely mapped inside of a VNTR.
"""
if rlen < clen:
raise Exception('clen is larger than rlen.')
elif rlen % clen > 0:
return int(math.ceil(float(rlen) / clen))
else:
return 1 + rlen / clen
if args.gen_ref:
ccn = critical_copy_number(args.rlen, len(vntr))
num_seqs = int(math.ceil(float(150) / len(vntr)))
fn = args.o.replace('.fa', '_reference.fa')
if os.path.exists(fn):
os.remove(fn)
for i in range(0, ccn + 1):
r_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)
write_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.
format(i), write_mode='a')
bed_fn = args.o.replace('.fa', '_reference.bed')
with open(bed_fn, 'w') as f:
for i in range(0, ccn + 1):
sequence_name = 'seq{}'.format(i)
wrt = [sequence_name, loc, loc + len(vntr * i)]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
bed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')
with open(bed_fn, 'w') as f:
for i in range(0, ccn + 1):
sequence_name = 'seq{}'.format(i)
wrt = [sequence_name, 0, loc]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
wrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)
]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
subprocess.call('bwa index {}'.format(fn), shell=True)
<|reserved_special_token_1|>
#! /home/joreyna/anaconda2/envs/hla/bin/python
import argparse
import os
import sys
import time
import numpy as np
import copy
import subprocess
import math
project_dir = os.path.join(sys.argv[0], '../../')
project_dir = os.path.abspath(project_dir)
output_dir = os.path.join(project_dir, 'output/', 'pipeline/', 'sample/')
subprocess.call('mkdir -p {}'.format(output_dir), shell=True)
# PARSING commandline arguments
parser = argparse.ArgumentParser(description='Generate a DNA sequence containing a VNTR sequence.')
parser.add_argument('len', metavar='seqLen', type=int, \
help='The length of the sequences.')
parser.add_argument('vntr', metavar='VNTR', type=str, \
help='The VNTR that will be introduced.',
default='GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')
parser.add_argument('numVNTR', metavar='numVNTR', type=int, \
help='The number of VNTR copies that will be introduced.')
parser.add_argument('numMuts', metavar='numMuts', type=int, \
help='The number of mutations per copy.')
parser.add_argument('--mutation_type', metavar='mutType', type=str, \
choices=['individual_random_mutations', 'group_random_mutations', 'specific_mutations'], \
default='individual_random_mutations',
help='Copies of the VNTR can different mutations. Specify ' + \
'mutation_type to simulate different mutational ' + \
'events in the VNTR copies.\n' + \
'Choices:\n' + \
'individual_random_mutations,\n' + \
'group_random_mutations, and\n' + \
'specific_mutations.')
parser.add_argument('--rlen', metavar='read length', type=int, \
help='The size of the output sequences.', default=150)
parser.add_argument('--loc', metavar='locus', type=int, \
help='The location where the snps are inserted.')
parser.add_argument('--outer_pad', action='store_true', \
help='Adds a padding around the VNTR for visual aid.', default=False)
parser.add_argument('--inner_pad', action='store_true', \
help='Adds a padding between copies of the VNTR for visual aid.', default=False)
parser.add_argument('-o', metavar='outputPrefix', type=str,
help='The prefix of the output filename.')
parser.add_argument('--gen_ref', action='store_true',
help='Generate a reference file as well which has a single copy of the VNTR.')
args = parser.parse_args()
## PRINTING commandline argument values
#print('\n')
#print('ArgParse Argument Values')
#print('--------------------')
#print('len: {}'.format(args.len))
#print('VNTR: {}'.format(args.vntr))
#print('VNTR copies: {}'.format(args.numVNTR))
#print('Mutations per VNTR copy: {}'.format(args.numMuts))
#print('Mutation Type: {}'.format(args.mutation_type))
#print('location: {}'.format(args.loc))
#print('outer pad: {}'.format(args.outer_pad))
#print('inner pad: {}'.format(args.inner_pad))
#print('output prefix: {}'.format(args.o))
#print('\n')
#
#
# DEFINING functions for generating random
# sequences with a VNTR insertion
def generate_mutation(base):
"""
Taking into account the current base, base, return a mutation.
"""
if base in ['A', 'C', 'G', 'T']:
bases = ['A', 'C', 'G', 'T']
bases.remove(base)
return np.random.choice(bases)
else:
raise Exception('base is not a proper DNA nucleotide (ACGT).')
def introduce_random_mutations(vntr, m):
"""
Generate a VNTR sequence with random mutations. The mutations will be the same across different copies.
Params
------
- vntr, the DNA copy sequence which is copied.
- m, the number of SNP mutations that will be randomly introduced.
Returns
-------
A single copy of the VNTR sequence with m mutations. \
"""
mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)
m_vntr = []
for site, nucleotide in enumerate(vntr):
if site in mutation_sites:
m_vntr.append(generate_mutation(nucleotide))
else:
m_vntr.append(nucleotide)
return ''.join(m_vntr)
def introduce_specific_mutations(vntr, sites, mutations):
"""
Generate a VNTR sequence with the specified mutations at the specified sites.
Params
------
- vntr, the DNA copy sequence which is copied.
- sites, locus where the SNP mutation will be introduced.
- mutations, a list of mutations.
Returns
-------
A single copy of the VNTR sequence with mutations at the specified sites.
"""
if len(sites) != len(mutations):
raise Exception('The number of sites and mutations do not correspond.')
m_vntr = list(vntr)
for site, nucleotide in enumerate(m_vntr):
if site in sites:
mut_idx = sites.index(site)
if nucleotide == mutations[mut_idx]:
raise Exception('Not a mutation. The current site is {}. The current '.format(site) + \
'nucleotide is {}. Please use a different nucleotide '.format(nucleotide) + \
'for this site.')
else:
m_vntr[site] = mutations[mut_idx]
return ''.join(m_vntr)
# SETTING a default value for the location
# of the insert size to the middle of the sequence
loc = args.loc
if loc == None:
loc = args.len / 2
# GENERATE the random sequence
sequence = ''.join(np.random.choice(['A', 'C', 'G', 'T'], size=args.len))
# MUTATE the vntr copies.
vntr = args.vntr
if args.mutation_type == 'individual_random_mutations':
# Testing incomplete
new_vntr = []
for i in range(args.numVNTR):
new_vntr.append(introduce_random_mutations(vntr, args.numMuts))
elif args.mutation_type == 'group_random_mutations':
# Testing incomplete
new_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR
elif args.mutation_type == 'specific_mutations':
# Deprecated. Coding incomplete.
new_vntr = introduce_specific_mutations(vntr, [0], ['C'])
# INSERT inner padding between VNTR copies
if args.inner_pad == True:
new_vntr = ' '.join(new_vntr)
else:
new_vntr = ''.join(new_vntr)
# INSERT outer padding around the VNTR
if args.outer_pad == True:
padding = ' ' * 10
new_vntr = padding + new_vntr + padding
# INSERT the VNTR into the sequence
def generate_sequence_with_vntr(sequence, loc, vntr):
nseq = sequence[0:loc]
nseq += vntr
nseq += sequence[loc:]
return nseq
n_sequence = generate_sequence_with_vntr(sequence, loc, new_vntr)
#print('Processed Variable Values')
#print('--------------------------')
#print('sequence: {}'.format(sequence))
#print('new_vntr: {}'.format(new_vntr))
#print('n_sequence: {}'.format(n_sequence))
#print('\n')
# MAKEDIR for the given sample
sample = os.path.split(args.o)[-1]
sample = sample.split('.')[0]
sample_dir = os.path.join(output_dir, sample)
subprocess.call('mkdir -p {}'.format(sample_dir), shell=True)
# WRITE the sequence file
def write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):
with open(fn, write_mode) as f:
f.write('>{}\n'.format(sequence_name))
div = len(sequence) / rlen
fasta_seq = []
for i in range(div):
f.write('{}\n'.format(sequence[i * rlen: (i + 1) * rlen]))
f.write('{}\n'.format(sequence[div * rlen:]))
if args.o != None:
write_sequence(args.o, args.rlen, n_sequence)
# WRITE the reference file and bed file
def critical_copy_number(rlen, clen):
"""
Determines the minimum number of VNTR copies needed
so a read can be completely mapped inside of a VNTR.
"""
if rlen < clen:
raise Exception('clen is larger than rlen.')
elif rlen % clen > 0:
return int(math.ceil(float(rlen) / clen))
else:
return 1 + (rlen/clen)
if args.gen_ref:
# CALCULATE the critical copy number
ccn = critical_copy_number(args.rlen, len(vntr))
# WRITE the reference file
num_seqs = int(math.ceil(float(150)/len(vntr)))
fn = args.o.replace('.fa', '_reference.fa')
if os.path.exists(fn): # REMOVE if already exists
os.remove(fn)
for i in range(0, ccn + 1):
r_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)
write_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.format(i), write_mode='a')
# WRITE the bed file for VNTR and non-VNTR regions
bed_fn = args.o.replace('.fa', '_reference.bed')
with open(bed_fn, 'w') as f:
#print('read length: {}, vntr length: {}'.format(args.rlen, len(vntr)))
#print('critical copy number: {}'.format(ccn))
for i in range(0, ccn + 1):
sequence_name='seq{}'.format(i)
wrt = [sequence_name, loc, loc + len(vntr * i)]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
bed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')
with open(bed_fn, 'w') as f:
#print('read length: {}, vntr length: {}'.format(args.rlen, len(vntr)))
#print('critical copy number: {}'.format(ccn))
for i in range(0, ccn + 1):
sequence_name='seq{}'.format(i)
wrt = [sequence_name, 0, loc]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
wrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
# INDEX the reference file
subprocess.call('bwa index {}'.format(fn), shell=True)
|
flexible
|
{
"blob_id": "d3f80deb72ca2bd91fc09b49ad644f54d339f962",
"index": 5819,
"step-1": "<mask token>\n\n\ndef generate_mutation(base):\n \"\"\"\n\tTaking into account the current base, base, return a mutation.\n\t\n\t\"\"\"\n if base in ['A', 'C', 'G', 'T']:\n bases = ['A', 'C', 'G', 'T']\n bases.remove(base)\n return np.random.choice(bases)\n else:\n raise Exception('base is not a proper DNA nucleotide (ACGT).')\n\n\ndef introduce_random_mutations(vntr, m):\n \"\"\"\n\tGenerate a VNTR sequence with random mutations. The mutations will be the same across different copies. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- m, the number of SNP mutations that will be randomly introduced. \n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with m mutations. \t\"\"\"\n mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)\n m_vntr = []\n for site, nucleotide in enumerate(vntr):\n if site in mutation_sites:\n m_vntr.append(generate_mutation(nucleotide))\n else:\n m_vntr.append(nucleotide)\n return ''.join(m_vntr)\n\n\ndef introduce_specific_mutations(vntr, sites, mutations):\n \"\"\"\n\tGenerate a VNTR sequence with the specified mutations at the specified sites. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- sites, locus where the SNP mutation will be introduced. \n\t- mutations, a list of mutations.\n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with mutations at the specified sites. \n\t\"\"\"\n if len(sites) != len(mutations):\n raise Exception('The number of sites and mutations do not correspond.')\n m_vntr = list(vntr)\n for site, nucleotide in enumerate(m_vntr):\n if site in sites:\n mut_idx = sites.index(site)\n if nucleotide == mutations[mut_idx]:\n raise Exception(\n 'Not a mutation. The current site is {}. The current '.\n format(site) +\n 'nucleotide is {}. Please use a different nucleotide '.\n format(nucleotide) + 'for this site.')\n else:\n m_vntr[site] = mutations[mut_idx]\n return ''.join(m_vntr)\n\n\n<mask token>\n\n\ndef write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):\n with open(fn, write_mode) as f:\n f.write('>{}\\n'.format(sequence_name))\n div = len(sequence) / rlen\n fasta_seq = []\n for i in range(div):\n f.write('{}\\n'.format(sequence[i * rlen:(i + 1) * rlen]))\n f.write('{}\\n'.format(sequence[div * rlen:]))\n\n\n<mask token>\n\n\ndef critical_copy_number(rlen, clen):\n \"\"\"\n Determines the minimum number of VNTR copies needed \n so a read can be completely mapped inside of a VNTR.\n \"\"\"\n if rlen < clen:\n raise Exception('clen is larger than rlen.')\n elif rlen % clen > 0:\n return int(math.ceil(float(rlen) / clen))\n else:\n return 1 + rlen / clen\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_mutation(base):\n \"\"\"\n\tTaking into account the current base, base, return a mutation.\n\t\n\t\"\"\"\n if base in ['A', 'C', 'G', 'T']:\n bases = ['A', 'C', 'G', 'T']\n bases.remove(base)\n return np.random.choice(bases)\n else:\n raise Exception('base is not a proper DNA nucleotide (ACGT).')\n\n\ndef introduce_random_mutations(vntr, m):\n \"\"\"\n\tGenerate a VNTR sequence with random mutations. The mutations will be the same across different copies. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- m, the number of SNP mutations that will be randomly introduced. \n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with m mutations. \t\"\"\"\n mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)\n m_vntr = []\n for site, nucleotide in enumerate(vntr):\n if site in mutation_sites:\n m_vntr.append(generate_mutation(nucleotide))\n else:\n m_vntr.append(nucleotide)\n return ''.join(m_vntr)\n\n\ndef introduce_specific_mutations(vntr, sites, mutations):\n \"\"\"\n\tGenerate a VNTR sequence with the specified mutations at the specified sites. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- sites, locus where the SNP mutation will be introduced. \n\t- mutations, a list of mutations.\n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with mutations at the specified sites. \n\t\"\"\"\n if len(sites) != len(mutations):\n raise Exception('The number of sites and mutations do not correspond.')\n m_vntr = list(vntr)\n for site, nucleotide in enumerate(m_vntr):\n if site in sites:\n mut_idx = sites.index(site)\n if nucleotide == mutations[mut_idx]:\n raise Exception(\n 'Not a mutation. The current site is {}. The current '.\n format(site) +\n 'nucleotide is {}. Please use a different nucleotide '.\n format(nucleotide) + 'for this site.')\n else:\n m_vntr[site] = mutations[mut_idx]\n return ''.join(m_vntr)\n\n\n<mask token>\n\n\ndef generate_sequence_with_vntr(sequence, loc, vntr):\n nseq = sequence[0:loc]\n nseq += vntr\n nseq += sequence[loc:]\n return nseq\n\n\n<mask token>\n\n\ndef write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):\n with open(fn, write_mode) as f:\n f.write('>{}\\n'.format(sequence_name))\n div = len(sequence) / rlen\n fasta_seq = []\n for i in range(div):\n f.write('{}\\n'.format(sequence[i * rlen:(i + 1) * rlen]))\n f.write('{}\\n'.format(sequence[div * rlen:]))\n\n\n<mask token>\n\n\ndef critical_copy_number(rlen, clen):\n \"\"\"\n Determines the minimum number of VNTR copies needed \n so a read can be completely mapped inside of a VNTR.\n \"\"\"\n if rlen < clen:\n raise Exception('clen is larger than rlen.')\n elif rlen % clen > 0:\n return int(math.ceil(float(rlen) / clen))\n else:\n return 1 + rlen / clen\n\n\n<mask token>\n",
"step-3": "<mask token>\nsubprocess.call('mkdir -p {}'.format(output_dir), shell=True)\n<mask token>\nparser.add_argument('len', metavar='seqLen', type=int, help=\n 'The length of the sequences.')\nparser.add_argument('vntr', metavar='VNTR', type=str, help=\n 'The VNTR that will be introduced.', default=\n 'GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')\nparser.add_argument('numVNTR', metavar='numVNTR', type=int, help=\n 'The number of VNTR copies that will be introduced.')\nparser.add_argument('numMuts', metavar='numMuts', type=int, help=\n 'The number of mutations per copy.')\nparser.add_argument('--mutation_type', metavar='mutType', type=str, choices\n =['individual_random_mutations', 'group_random_mutations',\n 'specific_mutations'], default='individual_random_mutations', help=\n 'Copies of the VNTR can different mutations. Specify ' +\n 'mutation_type to simulate different mutational ' +\n \"\"\"events in the VNTR copies.\n\"\"\" + 'Choices:\\n' +\n 'individual_random_mutations,\\n' + 'group_random_mutations, and\\n' +\n 'specific_mutations.')\nparser.add_argument('--rlen', metavar='read length', type=int, help=\n 'The size of the output sequences.', default=150)\nparser.add_argument('--loc', metavar='locus', type=int, help=\n 'The location where the snps are inserted.')\nparser.add_argument('--outer_pad', action='store_true', help=\n 'Adds a padding around the VNTR for visual aid.', default=False)\nparser.add_argument('--inner_pad', action='store_true', help=\n 'Adds a padding between copies of the VNTR for visual aid.', default=False)\nparser.add_argument('-o', metavar='outputPrefix', type=str, help=\n 'The prefix of the output filename.')\nparser.add_argument('--gen_ref', action='store_true', help=\n 'Generate a reference file as well which has a single copy of the VNTR.')\n<mask token>\n\n\ndef generate_mutation(base):\n \"\"\"\n\tTaking into account the current base, base, return a mutation.\n\t\n\t\"\"\"\n if base in ['A', 'C', 'G', 'T']:\n bases = ['A', 'C', 'G', 'T']\n bases.remove(base)\n return np.random.choice(bases)\n else:\n raise Exception('base is not a proper DNA nucleotide (ACGT).')\n\n\ndef introduce_random_mutations(vntr, m):\n \"\"\"\n\tGenerate a VNTR sequence with random mutations. The mutations will be the same across different copies. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- m, the number of SNP mutations that will be randomly introduced. \n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with m mutations. \t\"\"\"\n mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)\n m_vntr = []\n for site, nucleotide in enumerate(vntr):\n if site in mutation_sites:\n m_vntr.append(generate_mutation(nucleotide))\n else:\n m_vntr.append(nucleotide)\n return ''.join(m_vntr)\n\n\ndef introduce_specific_mutations(vntr, sites, mutations):\n \"\"\"\n\tGenerate a VNTR sequence with the specified mutations at the specified sites. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- sites, locus where the SNP mutation will be introduced. \n\t- mutations, a list of mutations.\n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with mutations at the specified sites. \n\t\"\"\"\n if len(sites) != len(mutations):\n raise Exception('The number of sites and mutations do not correspond.')\n m_vntr = list(vntr)\n for site, nucleotide in enumerate(m_vntr):\n if site in sites:\n mut_idx = sites.index(site)\n if nucleotide == mutations[mut_idx]:\n raise Exception(\n 'Not a mutation. The current site is {}. The current '.\n format(site) +\n 'nucleotide is {}. Please use a different nucleotide '.\n format(nucleotide) + 'for this site.')\n else:\n m_vntr[site] = mutations[mut_idx]\n return ''.join(m_vntr)\n\n\n<mask token>\nif loc == None:\n loc = args.len / 2\n<mask token>\nif args.mutation_type == 'individual_random_mutations':\n new_vntr = []\n for i in range(args.numVNTR):\n new_vntr.append(introduce_random_mutations(vntr, args.numMuts))\nelif args.mutation_type == 'group_random_mutations':\n new_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR\nelif args.mutation_type == 'specific_mutations':\n new_vntr = introduce_specific_mutations(vntr, [0], ['C'])\nif args.inner_pad == True:\n new_vntr = ' '.join(new_vntr)\nelse:\n new_vntr = ''.join(new_vntr)\nif args.outer_pad == True:\n padding = ' ' * 10\n new_vntr = padding + new_vntr + padding\n\n\ndef generate_sequence_with_vntr(sequence, loc, vntr):\n nseq = sequence[0:loc]\n nseq += vntr\n nseq += sequence[loc:]\n return nseq\n\n\n<mask token>\nsubprocess.call('mkdir -p {}'.format(sample_dir), shell=True)\n\n\ndef write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):\n with open(fn, write_mode) as f:\n f.write('>{}\\n'.format(sequence_name))\n div = len(sequence) / rlen\n fasta_seq = []\n for i in range(div):\n f.write('{}\\n'.format(sequence[i * rlen:(i + 1) * rlen]))\n f.write('{}\\n'.format(sequence[div * rlen:]))\n\n\nif args.o != None:\n write_sequence(args.o, args.rlen, n_sequence)\n\n\ndef critical_copy_number(rlen, clen):\n \"\"\"\n Determines the minimum number of VNTR copies needed \n so a read can be completely mapped inside of a VNTR.\n \"\"\"\n if rlen < clen:\n raise Exception('clen is larger than rlen.')\n elif rlen % clen > 0:\n return int(math.ceil(float(rlen) / clen))\n else:\n return 1 + rlen / clen\n\n\nif args.gen_ref:\n ccn = critical_copy_number(args.rlen, len(vntr))\n num_seqs = int(math.ceil(float(150) / len(vntr)))\n fn = args.o.replace('.fa', '_reference.fa')\n if os.path.exists(fn):\n os.remove(fn)\n for i in range(0, ccn + 1):\n r_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)\n write_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.\n format(i), write_mode='a')\n bed_fn = args.o.replace('.fa', '_reference.bed')\n with open(bed_fn, 'w') as f:\n for i in range(0, ccn + 1):\n sequence_name = 'seq{}'.format(i)\n wrt = [sequence_name, loc, loc + len(vntr * i)]\n wrt = [str(x) for x in wrt]\n f.write('\\t'.join(wrt) + '\\n')\n bed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')\n with open(bed_fn, 'w') as f:\n for i in range(0, ccn + 1):\n sequence_name = 'seq{}'.format(i)\n wrt = [sequence_name, 0, loc]\n wrt = [str(x) for x in wrt]\n f.write('\\t'.join(wrt) + '\\n')\n wrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)\n ]\n wrt = [str(x) for x in wrt]\n f.write('\\t'.join(wrt) + '\\n')\n subprocess.call('bwa index {}'.format(fn), shell=True)\n",
"step-4": "import argparse\nimport os\nimport sys\nimport time\nimport numpy as np\nimport copy\nimport subprocess\nimport math\nproject_dir = os.path.join(sys.argv[0], '../../')\nproject_dir = os.path.abspath(project_dir)\noutput_dir = os.path.join(project_dir, 'output/', 'pipeline/', 'sample/')\nsubprocess.call('mkdir -p {}'.format(output_dir), shell=True)\nparser = argparse.ArgumentParser(description=\n 'Generate a DNA sequence containing a VNTR sequence.')\nparser.add_argument('len', metavar='seqLen', type=int, help=\n 'The length of the sequences.')\nparser.add_argument('vntr', metavar='VNTR', type=str, help=\n 'The VNTR that will be introduced.', default=\n 'GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')\nparser.add_argument('numVNTR', metavar='numVNTR', type=int, help=\n 'The number of VNTR copies that will be introduced.')\nparser.add_argument('numMuts', metavar='numMuts', type=int, help=\n 'The number of mutations per copy.')\nparser.add_argument('--mutation_type', metavar='mutType', type=str, choices\n =['individual_random_mutations', 'group_random_mutations',\n 'specific_mutations'], default='individual_random_mutations', help=\n 'Copies of the VNTR can different mutations. Specify ' +\n 'mutation_type to simulate different mutational ' +\n \"\"\"events in the VNTR copies.\n\"\"\" + 'Choices:\\n' +\n 'individual_random_mutations,\\n' + 'group_random_mutations, and\\n' +\n 'specific_mutations.')\nparser.add_argument('--rlen', metavar='read length', type=int, help=\n 'The size of the output sequences.', default=150)\nparser.add_argument('--loc', metavar='locus', type=int, help=\n 'The location where the snps are inserted.')\nparser.add_argument('--outer_pad', action='store_true', help=\n 'Adds a padding around the VNTR for visual aid.', default=False)\nparser.add_argument('--inner_pad', action='store_true', help=\n 'Adds a padding between copies of the VNTR for visual aid.', default=False)\nparser.add_argument('-o', metavar='outputPrefix', type=str, help=\n 'The prefix of the output filename.')\nparser.add_argument('--gen_ref', action='store_true', help=\n 'Generate a reference file as well which has a single copy of the VNTR.')\nargs = parser.parse_args()\n\n\ndef generate_mutation(base):\n \"\"\"\n\tTaking into account the current base, base, return a mutation.\n\t\n\t\"\"\"\n if base in ['A', 'C', 'G', 'T']:\n bases = ['A', 'C', 'G', 'T']\n bases.remove(base)\n return np.random.choice(bases)\n else:\n raise Exception('base is not a proper DNA nucleotide (ACGT).')\n\n\ndef introduce_random_mutations(vntr, m):\n \"\"\"\n\tGenerate a VNTR sequence with random mutations. The mutations will be the same across different copies. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- m, the number of SNP mutations that will be randomly introduced. \n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with m mutations. \t\"\"\"\n mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)\n m_vntr = []\n for site, nucleotide in enumerate(vntr):\n if site in mutation_sites:\n m_vntr.append(generate_mutation(nucleotide))\n else:\n m_vntr.append(nucleotide)\n return ''.join(m_vntr)\n\n\ndef introduce_specific_mutations(vntr, sites, mutations):\n \"\"\"\n\tGenerate a VNTR sequence with the specified mutations at the specified sites. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- sites, locus where the SNP mutation will be introduced. \n\t- mutations, a list of mutations.\n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with mutations at the specified sites. \n\t\"\"\"\n if len(sites) != len(mutations):\n raise Exception('The number of sites and mutations do not correspond.')\n m_vntr = list(vntr)\n for site, nucleotide in enumerate(m_vntr):\n if site in sites:\n mut_idx = sites.index(site)\n if nucleotide == mutations[mut_idx]:\n raise Exception(\n 'Not a mutation. The current site is {}. The current '.\n format(site) +\n 'nucleotide is {}. Please use a different nucleotide '.\n format(nucleotide) + 'for this site.')\n else:\n m_vntr[site] = mutations[mut_idx]\n return ''.join(m_vntr)\n\n\nloc = args.loc\nif loc == None:\n loc = args.len / 2\nsequence = ''.join(np.random.choice(['A', 'C', 'G', 'T'], size=args.len))\nvntr = args.vntr\nif args.mutation_type == 'individual_random_mutations':\n new_vntr = []\n for i in range(args.numVNTR):\n new_vntr.append(introduce_random_mutations(vntr, args.numMuts))\nelif args.mutation_type == 'group_random_mutations':\n new_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR\nelif args.mutation_type == 'specific_mutations':\n new_vntr = introduce_specific_mutations(vntr, [0], ['C'])\nif args.inner_pad == True:\n new_vntr = ' '.join(new_vntr)\nelse:\n new_vntr = ''.join(new_vntr)\nif args.outer_pad == True:\n padding = ' ' * 10\n new_vntr = padding + new_vntr + padding\n\n\ndef generate_sequence_with_vntr(sequence, loc, vntr):\n nseq = sequence[0:loc]\n nseq += vntr\n nseq += sequence[loc:]\n return nseq\n\n\nn_sequence = generate_sequence_with_vntr(sequence, loc, new_vntr)\nsample = os.path.split(args.o)[-1]\nsample = sample.split('.')[0]\nsample_dir = os.path.join(output_dir, sample)\nsubprocess.call('mkdir -p {}'.format(sample_dir), shell=True)\n\n\ndef write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):\n with open(fn, write_mode) as f:\n f.write('>{}\\n'.format(sequence_name))\n div = len(sequence) / rlen\n fasta_seq = []\n for i in range(div):\n f.write('{}\\n'.format(sequence[i * rlen:(i + 1) * rlen]))\n f.write('{}\\n'.format(sequence[div * rlen:]))\n\n\nif args.o != None:\n write_sequence(args.o, args.rlen, n_sequence)\n\n\ndef critical_copy_number(rlen, clen):\n \"\"\"\n Determines the minimum number of VNTR copies needed \n so a read can be completely mapped inside of a VNTR.\n \"\"\"\n if rlen < clen:\n raise Exception('clen is larger than rlen.')\n elif rlen % clen > 0:\n return int(math.ceil(float(rlen) / clen))\n else:\n return 1 + rlen / clen\n\n\nif args.gen_ref:\n ccn = critical_copy_number(args.rlen, len(vntr))\n num_seqs = int(math.ceil(float(150) / len(vntr)))\n fn = args.o.replace('.fa', '_reference.fa')\n if os.path.exists(fn):\n os.remove(fn)\n for i in range(0, ccn + 1):\n r_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)\n write_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.\n format(i), write_mode='a')\n bed_fn = args.o.replace('.fa', '_reference.bed')\n with open(bed_fn, 'w') as f:\n for i in range(0, ccn + 1):\n sequence_name = 'seq{}'.format(i)\n wrt = [sequence_name, loc, loc + len(vntr * i)]\n wrt = [str(x) for x in wrt]\n f.write('\\t'.join(wrt) + '\\n')\n bed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')\n with open(bed_fn, 'w') as f:\n for i in range(0, ccn + 1):\n sequence_name = 'seq{}'.format(i)\n wrt = [sequence_name, 0, loc]\n wrt = [str(x) for x in wrt]\n f.write('\\t'.join(wrt) + '\\n')\n wrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)\n ]\n wrt = [str(x) for x in wrt]\n f.write('\\t'.join(wrt) + '\\n')\n subprocess.call('bwa index {}'.format(fn), shell=True)\n",
"step-5": "#! /home/joreyna/anaconda2/envs/hla/bin/python \nimport argparse \nimport os\nimport sys\nimport time \nimport numpy as np\nimport copy\nimport subprocess\nimport math\n\nproject_dir = os.path.join(sys.argv[0], '../../')\nproject_dir = os.path.abspath(project_dir)\noutput_dir = os.path.join(project_dir, 'output/', 'pipeline/', 'sample/')\nsubprocess.call('mkdir -p {}'.format(output_dir), shell=True)\n\n# PARSING commandline arguments \nparser = argparse.ArgumentParser(description='Generate a DNA sequence containing a VNTR sequence.')\nparser.add_argument('len', metavar='seqLen', type=int, \\\n\t\thelp='The length of the sequences.')\nparser.add_argument('vntr', metavar='VNTR', type=str, \\\n\t\thelp='The VNTR that will be introduced.', \n\t\tdefault='GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')\nparser.add_argument('numVNTR', metavar='numVNTR', type=int, \\\n\t\thelp='The number of VNTR copies that will be introduced.')\nparser.add_argument('numMuts', metavar='numMuts', type=int, \\\n\t\thelp='The number of mutations per copy.')\nparser.add_argument('--mutation_type', metavar='mutType', type=str, \\\n\t\tchoices=['individual_random_mutations', 'group_random_mutations', 'specific_mutations'], \\\n\t\tdefault='individual_random_mutations',\n\t\thelp='Copies of the VNTR can different mutations. Specify ' + \\\n\t\t\t'mutation_type to simulate different mutational ' + \\\n\t\t\t'events in the VNTR copies.\\n' + \\\n\t\t\t'Choices:\\n' + \\\n\t\t\t'individual_random_mutations,\\n' + \\\n\t\t\t'group_random_mutations, and\\n' + \\\n\t\t\t'specific_mutations.')\nparser.add_argument('--rlen', metavar='read length', type=int, \\\n\t\thelp='The size of the output sequences.', default=150)\nparser.add_argument('--loc', metavar='locus', type=int, \\\n\t\thelp='The location where the snps are inserted.')\nparser.add_argument('--outer_pad', action='store_true', \\\n\t\thelp='Adds a padding around the VNTR for visual aid.', default=False)\nparser.add_argument('--inner_pad', action='store_true', \\\n\t\thelp='Adds a padding between copies of the VNTR for visual aid.', default=False)\nparser.add_argument('-o', metavar='outputPrefix', type=str, \n\t\thelp='The prefix of the output filename.')\nparser.add_argument('--gen_ref', action='store_true', \n\t\thelp='Generate a reference file as well which has a single copy of the VNTR.')\nargs = parser.parse_args()\n\n\n## PRINTING commandline argument values \n#print('\\n')\n#print('ArgParse Argument Values')\n#print('--------------------')\n#print('len: {}'.format(args.len))\n#print('VNTR: {}'.format(args.vntr))\n#print('VNTR copies: {}'.format(args.numVNTR))\n#print('Mutations per VNTR copy: {}'.format(args.numMuts))\n#print('Mutation Type: {}'.format(args.mutation_type))\n#print('location: {}'.format(args.loc))\n#print('outer pad: {}'.format(args.outer_pad))\n#print('inner pad: {}'.format(args.inner_pad))\n#print('output prefix: {}'.format(args.o))\n#print('\\n')\n#\n#\n# DEFINING functions for generating random \n# sequences with a VNTR insertion\ndef generate_mutation(base):\n\t\"\"\"\n\tTaking into account the current base, base, return a mutation.\n\t\n\t\"\"\"\n\tif base in ['A', 'C', 'G', 'T']:\n\t\tbases = ['A', 'C', 'G', 'T']\n\t\tbases.remove(base)\n\t\treturn np.random.choice(bases)\n\telse:\n\t\traise Exception('base is not a proper DNA nucleotide (ACGT).')\n\n\ndef introduce_random_mutations(vntr, m):\n\t\"\"\"\n\tGenerate a VNTR sequence with random mutations. The mutations will be the same across different copies. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- m, the number of SNP mutations that will be randomly introduced. \n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with m mutations. \\\n\t\"\"\"\n\t\n\tmutation_sites = np.random.choice(range(len(vntr)), m, replace=False)\n\tm_vntr = []\n\tfor site, nucleotide in enumerate(vntr):\n\t\tif site in mutation_sites:\n\t\t\tm_vntr.append(generate_mutation(nucleotide))\n\t\telse:\n\t\t\tm_vntr.append(nucleotide)\n\treturn ''.join(m_vntr)\n\n\ndef introduce_specific_mutations(vntr, sites, mutations):\n\t\"\"\"\n\tGenerate a VNTR sequence with the specified mutations at the specified sites. \n\t\n\tParams\n\t------\n\t\n\t- vntr, the DNA copy sequence which is copied. \n\t- sites, locus where the SNP mutation will be introduced. \n\t- mutations, a list of mutations.\n\t\n\tReturns\n\t-------\n\tA single copy of the VNTR sequence with mutations at the specified sites. \n\t\"\"\"\n\t\n\tif len(sites) != len(mutations):\n\t\traise Exception('The number of sites and mutations do not correspond.')\n\tm_vntr = list(vntr)\n\tfor site, nucleotide in enumerate(m_vntr):\n\t\tif site in sites:\n\t\t\tmut_idx = sites.index(site)\n\t\t\tif nucleotide == mutations[mut_idx]:\n\t\t\t\traise Exception('Not a mutation. The current site is {}. The current '.format(site) + \\\n\t\t\t\t\t'nucleotide is {}. Please use a different nucleotide '.format(nucleotide) + \\\n\t\t\t\t\t'for this site.')\n\t\t\telse:\n\t\t\t\tm_vntr[site] = mutations[mut_idx]\n\treturn ''.join(m_vntr)\n\n\n\n# SETTING a default value for the location \n# of the insert size to the middle of the sequence \nloc = args.loc\nif loc == None:\n\tloc = args.len / 2 \n\n# GENERATE the random sequence \nsequence = ''.join(np.random.choice(['A', 'C', 'G', 'T'], size=args.len))\n\n# MUTATE the vntr copies.\nvntr = args.vntr\nif args.mutation_type == 'individual_random_mutations':\n\t# Testing incomplete \n\tnew_vntr = []\n\tfor i in range(args.numVNTR):\n\t\tnew_vntr.append(introduce_random_mutations(vntr, args.numMuts))\n\nelif args.mutation_type == 'group_random_mutations':\n\t# Testing incomplete \n\tnew_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR \n\t\nelif args.mutation_type == 'specific_mutations': \n\t# Deprecated. Coding incomplete.\n\tnew_vntr = introduce_specific_mutations(vntr, [0], ['C'])\n\n# INSERT inner padding between VNTR copies\nif args.inner_pad == True:\n\tnew_vntr = ' '.join(new_vntr)\nelse:\n\tnew_vntr = ''.join(new_vntr)\n\n# INSERT outer padding around the VNTR\nif args.outer_pad == True:\n\tpadding = ' ' * 10\n\tnew_vntr = padding + new_vntr + padding\n\n# INSERT the VNTR into the sequence \ndef generate_sequence_with_vntr(sequence, loc, vntr):\n\tnseq = sequence[0:loc]\n\tnseq += vntr \n\tnseq += sequence[loc:]\n\treturn nseq \nn_sequence = generate_sequence_with_vntr(sequence, loc, new_vntr)\n\n#print('Processed Variable Values')\n#print('--------------------------')\n#print('sequence: {}'.format(sequence))\n#print('new_vntr: {}'.format(new_vntr))\n#print('n_sequence: {}'.format(n_sequence))\n#print('\\n')\n\n# MAKEDIR for the given sample \nsample = os.path.split(args.o)[-1]\nsample = sample.split('.')[0]\nsample_dir = os.path.join(output_dir, sample)\nsubprocess.call('mkdir -p {}'.format(sample_dir), shell=True)\n\n# WRITE the sequence file \ndef write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):\n\twith open(fn, write_mode) as f:\n\t\tf.write('>{}\\n'.format(sequence_name))\n\t\tdiv = len(sequence) / rlen\n\t\tfasta_seq = []\n\t\tfor i in range(div):\n\t\t\tf.write('{}\\n'.format(sequence[i * rlen: (i + 1) * rlen]))\n\t\tf.write('{}\\n'.format(sequence[div * rlen:]))\nif args.o != None:\n\twrite_sequence(args.o, args.rlen, n_sequence)\n\n\n \n\n# WRITE the reference file and bed file \ndef critical_copy_number(rlen, clen):\n \"\"\"\n Determines the minimum number of VNTR copies needed \n so a read can be completely mapped inside of a VNTR.\n \"\"\"\n \n if rlen < clen: \n raise Exception('clen is larger than rlen.')\n elif rlen % clen > 0:\n return int(math.ceil(float(rlen) / clen))\n else:\n return 1 + (rlen/clen)\n\nif args.gen_ref:\n\n\t# CALCULATE the critical copy number \n\tccn = critical_copy_number(args.rlen, len(vntr))\n\n\t# WRITE the reference file\n\tnum_seqs = int(math.ceil(float(150)/len(vntr)))\n\tfn = args.o.replace('.fa', '_reference.fa')\n\tif os.path.exists(fn): # REMOVE if already exists \n\t\tos.remove(fn)\n\n\tfor i in range(0, ccn + 1):\n\t\tr_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)\n\t\twrite_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.format(i), write_mode='a')\n\n\t# WRITE the bed file for VNTR and non-VNTR regions \n\tbed_fn = args.o.replace('.fa', '_reference.bed')\n\twith open(bed_fn, 'w') as f:\n\n\t\t#print('read length: {}, vntr length: {}'.format(args.rlen, len(vntr)))\n\n\n\t\t#print('critical copy number: {}'.format(ccn))\n\n\t\tfor i in range(0, ccn + 1):\n\t\t\tsequence_name='seq{}'.format(i)\n\t\t\twrt = [sequence_name, loc, loc + len(vntr * i)]\n\t\t\twrt = [str(x) for x in wrt]\n\t\t\tf.write('\\t'.join(wrt) + '\\n')\n\n\tbed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')\n\twith open(bed_fn, 'w') as f:\n\n\t\t#print('read length: {}, vntr length: {}'.format(args.rlen, len(vntr)))\n\t\t#print('critical copy number: {}'.format(ccn))\n\n\t\tfor i in range(0, ccn + 1):\n\t\t\tsequence_name='seq{}'.format(i)\n\n\t\t\twrt = [sequence_name, 0, loc]\n\t\t\twrt = [str(x) for x in wrt]\n\t\t\tf.write('\\t'.join(wrt) + '\\n')\n\n\t\t\twrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)]\n\t\t\twrt = [str(x) for x in wrt]\n\t\t\tf.write('\\t'.join(wrt) + '\\n')\n\n\n\t# INDEX the reference file\n\tsubprocess.call('bwa index {}'.format(fn), shell=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
# -*- coding:utf-8 -*-
__author__ = 'leandro'
from datetime import *
from PyQt4 import QtGui, QtCore
from baseDatos.ventas.venta import NotaCredito
from gui import CRUDWidget,MdiWidget
from ventanas import Ui_vtnDevolucionDeCliente, Ui_vtnReintegroCliente, Ui_vtnVentaContado
from baseDatos.obraSocial import ObraSocial as ObraSocialModel
from baseDatos.productos import Producto as ProductoModel
from baseDatos.productos import Medicamento as MedicamentoModel
from baseDatos.productos import Monodroga as MonodrogaModel
from baseDatos.obraSocial import Descuento as DescuentoModel
from baseDatos.productos import Lote as LoteModel
from baseDatos.productos import LoteProducto as LoteProductoModel
from baseDatos.ventas import Factura as FacturaModel
from baseDatos.ventas import DetalleFactura as DetalleFacturaModel
from baseDatos.ventas import NotaCredito as NotaCreditoModel
from baseDatos.ventas import DetalleNotaCredito as DetalleNCModel
from baseDatos.ventas import CobroCliente as CobroClienteModel
from genComprobantes import generarNotaCredito,generarFactura
from validarDatos import ValidarDatos
from ventanas import Ui_Dialog
from gui.signals import PoolOfWindows
class DevolucionDeCliente(CRUDWidget, Ui_vtnDevolucionDeCliente):
"""
Clase encargada de modelar la funcionalidad de Devolucion al Cliente
"""
plazo = 7
def __init__(self,mdi):
MdiWidget.__init__(self, mdi)
self.sesion = self.mdi().window().getSesionBD()
self.validadores()
self.btnBuscar.pressed.connect(self.buscarFactura)
self.tableFactura.doubleClicked.connect(self.devolverDetalle)
self.btnAceptar.pressed.connect(self.confirmarOperacion)
self.btnCancelar.pressed.connect(self.cancelarOperacion)
self.lineNumero.returnPressed.connect(self.buscarFactura)
self.facturaSeleccionada = None
self.notaCredito = None
self.productosSeleccionados = 0
self.detallesDevueltos = {}
self.lotesDevueltos = {}
self.data = {}
def validadores(self):
camposRequeridos = [getattr(self,"lineNumero")]
ValidarDatos.setValidador(camposRequeridos)
def buscarFactura(self):
"""
Busca y carga los detalles correspondientes
al Nro de Factura ingresado.
:return:
"""
if not self.lineNumero.isEnabled() and self.facturaSeleccionada != None:
QtGui.QMessageBox.information(self,"Aviso","Ya se ha seleccionado una factura")
elif not self.lineNumero.isEnabled():
self.lineNumero.setEnabled(True)
self.lineNumero.clear()
self.limpiarTabla(self.tableFactura)
else:
self.numeroFacturaActual=str(self.lineNumero.text())
if len(self.numeroFacturaActual)==0:
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("No se ha ingresado número de factura"))
else:
self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)
if self.facturaSeleccionada==None:
QtGui.QMessageBox.warning(self,"Aviso","La factura seleccionada no existe")
elif self.facturaSeleccionada.getNC()!=None:
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("La factura ya ha posee una Nota de Crédito"))
self.facturaSeleccionada = None
elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("El tiempo permitido para la devolución ha expirado"))
elif self.facturaSeleccionada.estaLiquidada(self.sesion):
print self.facturaSeleccionada.estaLiquidada(self.sesion)
QtGui.QMessageBox.information(self,"Aviso","La factura se encuentra liquidada a la Obra Social")
else:
self.lineNumero.setEnabled(False)
self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.sesion),
["nro_linea","producto","cantidad","importe"])
def obtenerValoresItem(self,row):
"""
Obtiene los valores de una fila de
la Tabla de Detalles de Factura
:param row Numero de Fila:
:return Arreglo con valores de la fila:
"""
values=[]
for col in range(0,self.tableFactura.columnCount()):
values.append(self.tableFactura.item(row,col).text())
return values
def armarItem(self,item,cantidad,key):
"""
Genera y guarda el Detalle de la Nota de Credito
correspondiente a una devolucion
:param item Arreglo con informacion del Detalle de Factura:
:param cantidad Cantidad Devuelta:
:param key Clave del detalle de factura devuelto:
:return:
"""
row=self.tableNC.rowCount()
self.tableNC.insertRow(row)
for col, elemento in enumerate(item[1:]):
self.tableNC.setItem(row,col,QtGui.QTableWidgetItem(item[col+1]))
self.tableNC.item(row,1).setText(str(cantidad))
#Arreglo que contiene informacion del item agregado
self.data[key] = [str(item[1]),cantidad,0,float(item[3])]
def devolverDetalle(self):
"""
Incorpora el Detalle de Factura seleccionado
por el usuario a la Nota de Credito
:return:
"""
rowActual=self.tableFactura.currentItem().row()
signal = QtGui.QMessageBox.information(self,"Confirmación","¿Desea devolver este item?",\
QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)
if signal == QtGui.QMessageBox.Ok:
producto = int(self.tableFactura.item(rowActual,1).text())
cantidad_detalle = int(self.tableFactura.item(rowActual,2).text())
linea = int(self.tableFactura.item(rowActual,0).text())
nro_factura = int(self.lineNumero.text())
detalle = FacturaModel.getDetalle(nro_factura,linea,self.sesion)
lotes_detalle = detalle.devolverLotes(self.sesion)
temp = lotes_detalle
finalize_actualizacion = False
cantidad_restante = cantidad_detalle
while not finalize_actualizacion:
cantidad, ok = QtGui.QInputDialog.getInt(self,"Cantidad","Ingrese cantidad del producto",1,1,2000,5)
if ok == False:
finalize_actualizacion = True
self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))
break
lote, ok=QtGui.QInputDialog.getText(self,"Lote","Ingrese lote")
if ok == False:
finalize_actualizacion = True
self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))
break
if not lote in lotes_detalle.keys():
QtGui.QMessageBox.information(self,"Aviso","El lote ingresado no es valido para este detalle")
elif lotes_detalle[str(lote)] == 0:
QtGui.QMessageBox.information(self,"Aviso","Los productos de este lote ya han sido devueltos")
elif cantidad > lotes_detalle[str(lote)]:
QtGui.QMessageBox.information(self,"Aviso","La cantidad ingresada es mayor a la esperada para este lote")
else:
temp[str(lote)] -= cantidad
cantidad_restante -= cantidad
self.tableFactura.item(rowActual,2).setText(str(cantidad_restante))
if sum(map(lambda x: temp[x],temp)) == 0:
self.productosSeleccionados +=1
key = int(self.tableFactura.item(rowActual,0).text())
self.detallesDevueltos[key] = detalle
self.armarItem(self.obtenerValoresItem(rowActual),cantidad_detalle,key)
self.tableFactura.removeRow(rowActual)
finalize_actualizacion = True
def limpiarVentana(self):
"""
Limpia los componentes de la ventana
:return:
"""
self.limpiarTabla(self.tableFactura)
self.lineNumero.setEnabled(True)
self.lineNumero.clear()
self.limpiarTabla(self.tableNC)
def calcularTotal(self):
"""
Calculo el total a devolver en la
Nota de Credito
:return Total a Devolver:
"""
subtotales=[]
for row in range(0,self.tableNC.rowCount()):
subtotales.append(float(self.tableNC.item(row,2).text()))
return sum(subtotales)
def confirmarOperacion(self):
"""
Imprime la Nota de Credito, una vez que el
usuario confirmo la operacion.
:return:
"""
if self.productosSeleccionados != 0:
nc = NotaCreditoModel(NotaCreditoModel.generarNumero(self.sesion))
nc.guardar(self.sesion)
for nro_lnc, nro_lfactura in enumerate(self.detallesDevueltos):
detalle_nc = DetalleNCModel(nc.numero,nro_lnc+1,self.facturaSeleccionada.numero,nro_lfactura)
detalle_nc.setImporte(self.data[nro_lfactura][3])
detalle_nc.guardar(self.sesion)
self.detallesDevueltos[nro_lfactura].devolver(self.sesion) # Devuelve el detalle asociado de la factura
self.facturaSeleccionada.setNC(nc.numero)
self.facturaSeleccionada.modificar(self.sesion)
QtGui.QMessageBox.information(self,"Aviso","La factura ha sido devuelta")
self.objectModified.emit()
cobros = self.facturaSeleccionada.getCobros(self.sesion)
if len(cobros) == 1 and cobros[0].tipo == "Efectivo":
QtGui.QMessageBox.information(self,"Devolucion","El importe en efectivo a entregar es de: $%.2f" % self.calcularTotal())
#Se genera un diccionario con los datos necesarios para imprimir la nota de credito
data = {}
data["numero"] = nc.numero
data["fecha"] = nc.fecha_emision
data["detalles"] = self.data.values()
generarNotaCredito(data)
self.facturaSeleccionada=None
self.productosSeleccionados=0
self.detallesDevueltos = {}
self.limpiarVentana()
self.data = {}
else:
QtGui.QMessageBox.information(self,"Devolucion Cliente","No se ha agregado ningun producto para devolver")
def cancelarOperacion(self):
"""
Anula la Nota de Credito creada, actuliza el stock
de los productos a sus valores originales y limpia la ventana.
Si la Nota de Credito no fue creada limpia la ventana.
:return:
"""
signal = QtGui.QMessageBox.warning(self,"Advertencia",QtCore.QString.fromUtf8("¿Desea cancelar la operación?"),\
QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)
if signal == QtGui.QMessageBox.Ok:
self.data = {}
self.facturaSeleccionada = None
self.productosSeleccionados = 0
self.detallesDevueltos = {}
self.limpiarVentana()
def cancelarVentana(self):
self.data = {}
self.facturaSeleccionada = None
self.productosSeleccionados = 0
self.detallesDevueltos = {}
self.limpiarVentana()
class ReintegroCliente(CRUDWidget, Ui_vtnReintegroCliente):
"""
Clase encargada de modelar la funcionalidad de Reintegro al cliente
"""
plazo = 7
def __init__(self, mdi):
MdiWidget.__init__(self, mdi)
self.sesion = self.mdi().window().getSesionBD()
self.cargarObras()
self.validadores()
self.btnBuscarOs.pressed.connect(self.buscarOs)
self.tableOs.itemDoubleClicked.connect(self.obtenerObra)
self.btnBuscarFac.pressed.connect(self.buscarFactura)
self.lineRazon.returnPressed.connect(self.filtrarObra)
self.lineCuit.returnPressed.connect(self.filtrarObra)
self.lineNumeroFac.returnPressed.connect(self.buscarFactura)
self.btnAceptar.pressed.connect(self.confirmarOperacion)
self.btnCancelar.pressed.connect(self.cancelarOperacion)
self.tableFactura.itemDoubleClicked.connect(self.agregarProducto)
self.gbFactura.setEnabled(False)
self.gbNotaCredito.setEnabled(False)
self.detallesReintegrables = []
self.detallesImprimibles = []
self.obraSocial = None
self.facturaSeleccionada = None
def filtrarObra(self):
"""
Filtra la tabla de Obras Sociales de acuerdo
a los criterios de busqueda impuestos
:return:
"""
razon_social = str(self.lineRazon.text())
cuit = str(self.lineCuit.text())
data = self.getAllTabla(self.tableOs)
if razon_social != "":
dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())
else:
dataRazon = data.values()
if cuit != "":
dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)
else:
dataCuit = dataRazon
for dato in data:
self.tableOs.setRowHidden(dato,False)
for dato in data:
if not data[dato] in dataCuit:
self.tableOs.setRowHidden(dato,True)
def cargarObras(self):
"""
Carga las Obras Sociales disponibles
en la tabla correspondiente
:return:
"""
self.cargarObjetos(self.tableOs,
ObraSocialModel.buscarTodos("razon_social", self.sesion).all(),
("razon_social", "cuit", "direccion")
)
def validadores(self):
"""
Setea los validadores correspondientes a
los campos de la ventana
:return:
"""
camposRequeridos = [getattr(self,"lineRazon")]
ValidarDatos.setValidador(camposRequeridos)
camposRequeridos = [getattr(self,"lineCuit")]
ValidarDatos.setValidador(camposRequeridos)
camposRequeridos = [getattr(self,"lineNumeroFac")]
ValidarDatos.setValidador(camposRequeridos)
def buscarOs(self):
"""
Busca una Obra Social de acuerdo
a los criterios del usuario
:return:
"""
if self.lineRazon.isEnabled():
self.filtrarObra()
elif not self.lineRazon.isEnabled() and (self.tableNC.rowCount() != 0 or self.tableFactura.rowCount() != 0):
QtGui.QMessageBox.information(self,"Aviso","Imposible cambiar de Obra Social. Ya se ha seleccionado\
una")
else:
self.gbNotaCredito.setEnabled(False)
self.gbFactura.setEnabled(False)
self.lineRazon.clear()
self.lineRazon.setEnabled(True)
self.lineCuit.clear()
self.lineCuit.setEnabled(True)
self.tableOs.setEnabled(True)
def obtenerObra(self):
"""
Carga la Obra Social seleccionada
en los campos correspondientes.
:return:
"""
rowActual = self.tableOs.currentItem().row()
self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))
self.lineRazon.setEnabled(False)
self.obraSocial=str(self.tableOs.item(rowActual,0).text())
self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))
self.lineCuit.setEnabled(False)
self.tableOs.setEnabled(False)
self.gbFactura.setEnabled(True)
self.gbNotaCredito.setEnabled(True)
def buscarFactura(self):
"""
Busca la factura indica por el usuario.
En caso de no existir, notifica lo mismo
:return:
"""
if not self.lineNumeroFac.isEnabled() and self.tableNC.rowCount() != 0:
QtGui.QMessageBox.information(self,"Aviso","Ya se ha seleccionado una factura")
elif not self.lineNumeroFac.isEnabled():
self.lineNumeroFac.setEnabled(True)
self.lineNumeroFac.clear()
self.limpiarTabla(self.tableFactura)
else:
self.numeroFacturaActual=str(self.lineNumeroFac.text())
if len(self.numeroFacturaActual)==0:
self.showMsjEstado("No se ha ingresado numero de factura")
else:
self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)
if self.facturaSeleccionada==None:
QtGui.QMessageBox.information(self,"Aviso","La factura seleccionada no existe")
elif self.facturaSeleccionada.getObra() != None and self.facturaSeleccionada.getObra() != self.obraSocial:
QtGui.QMessageBox.information(self,"Aviso","La Obra Social seleccionada no corresponde con la factura")
elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():
QtGui.QMessageBox.information(self,"Aviso","El tiempo permitido para el reintegro ha expirado")
elif self.facturaSeleccionada.estaLiquidada(self.sesion):
QtGui.QMessageBox.information(self,"Aviso","La factura se encuentra liquidada a la Obra Social")
elif self.facturaSeleccionada.getNC()!=None:
QtGui.QMessageBox.information(self,"Aviso","La factura ya posee una Nota de Crédito")
else:
self.lineNumeroFac.setEnabled(False)
if self.facturaSeleccionada.getObra() == None:
self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.obraSocial, self.sesion),
["producto","cantidad","importe"])
else:
self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetallesSinDescuento(self.sesion),
["producto","cantidad","importe"])
def agregarProducto(self):
"""
Agrega un producto a la Nota de Credito
:return:
"""
itemActual=self.tableFactura.currentItem()
producto = int(self.tableFactura.item(itemActual.row(),0).text())
descuento = DescuentoModel.buscar(DescuentoModel.obra_social,self.sesion,self.obraSocial).\
filter(DescuentoModel.producto==producto)[0].descuento
cantidad = int(self.tableFactura.item(itemActual.row(), 1).text())
importe = float(self.tableFactura.item(itemActual.row(), 2).text()) * descuento
row = self.tableNC.rowCount()
self.tableNC.insertRow(row)
self.tableNC.setItem(row, 0, QtGui.QTableWidgetItem(str(producto)))
self.tableNC.setItem(row, 1, QtGui.QTableWidgetItem(str(cantidad)))
self.tableNC.setItem(row, 2, QtGui.QTableWidgetItem(str(importe)))
self.detallesReintegrables.append([int(self.numeroFacturaActual),itemActual.row()+1,descuento,importe])
self.detallesImprimibles.append([producto,cantidad,descuento,importe])
self.tableFactura.hideRow(itemActual.row())
def limpiarVentana(self):
"""
Limpia la ventana una vez que la operacion finalizó
:return:
"""
self.obraSocial = None
self.facturaSeleccionada = None
self.detallesReintegrables = []
self.detallesImprimibles = []
self.limpiarTabla(self.tableFactura)
self.limpiarTabla(self.tableNC)
self.lineCuit.clear()
self.lineRazon.clear()
self.lineNumeroFac.clear()
self.lineCuit.setEnabled(True)
self.lineRazon.setEnabled(True)
self.tableOs.setEnabled(True)
self.lineNumeroFac.setEnabled(True)
self.gbFactura.setEnabled(False)
self.gbNotaCredito.setEnabled(False)
def confirmarOperacion(self):
"""
Confirma la operacion y asienta los datos de la
Nota de Credito en la BD.
:return:
"""
if self.tableNC.rowCount() == 0 :
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("No se han agregado productos a la Nota de Crédito"))
else:
ok = QtGui.QMessageBox.information(self,QtCore.QString.fromUtf8("Confirmación"),\
QtCore.QString.fromUtf8("¿Desea generar la Nota Crédito?"),\
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)
if (ok==1):
notaCredito = NotaCreditoModel(NotaCredito.generarNumero(self.sesion))
notaCredito.guardar(self.sesion)
for lineaNC, data in enumerate(self.detallesReintegrables):
detalleNC = DetalleNCModel(notaCredito.numero, lineaNC+1, data[0], data[1])
detalleNC.setImporte(data[3])
detalleNC.setDescuento(data[2])
detalleNC.guardar(self.sesion)
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("La Nota de Crédito ha sido generada con éxito"))
self.facturaSeleccionada.setNC(notaCredito.numero)
self.facturaSeleccionada.modificar(self.sesion)
#Se genera un diccionario con los datos necesarios para imprimir la nota de credito
data = {}
data["numero"] = notaCredito.numero
data["fecha"] = notaCredito.fecha_emision
data["detalles"] = self.detallesImprimibles
generarNotaCredito(data)
self.limpiarVentana()
else:
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("La Nota de Crédito no ha sido generada"))
def cancelarOperacion(self):
"""
Cancela la operacion en curso y limpia la ventana
:return:
"""
ok = QtGui.QMessageBox.information(self,"Confirmacion","¿Desea cancelar la operacion?",\
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)
if (ok==1):
self.limpiarVentana()
def cancelarVentana(self):
self.limpiarVentana()
class VentaContado(CRUDWidget, Ui_vtnVentaContado):
"""
Clase encargada de modelar el comportamiento de Venta al Contado
"""
def __init__(self,mdi):
"""
Constructor de la clase VentaContado
:param mdi:
:return:
"""
MdiWidget.__init__(self, mdi)
self.sesion = self.mdi().window().getSesionBD()
self.validadores()
self.cargar_obras()
self.lineMedicamento.returnPressed.connect(self.buscarProd)
self.lineMonodroga.returnPressed.connect(self.buscarProd)
self.lineCuit.returnPressed.connect(self.buscarObra)
self.lineObra.returnPressed.connect(self.buscarObra)
self.tableObra.itemDoubleClicked.connect(self.cargarObra)
self.tableProductos.itemDoubleClicked.connect(self.agregarProducto)
self.btnBuscar.pressed.connect(self.limpiarObra)
self.btnAceptar.pressed.connect(self.confirmarOperacion)
self.btnCancelar.pressed.connect(self.cancelarOperacion)
self.btnEliminar.pressed.connect(self.eliminarDetalle)
self.rbtnObra.pressed.connect(self.habilitarObras)
self.btnBuscar.setEnabled(False)
self.tableObra.setVisible(False)
self.lineCuit.setEnabled(False)
self.lineObra.setEnabled(False)
self.cargarProductosSinObra()
self.productosAgregados=0
self.lotesVentas={}
self.facturaCobrada=False
self.obraSocialSeleccionada=None
self.formapago = None
self.factura = None
self.data = {}
self.detallesTabla = {}
def buscarProd(self):
"""
Filtra la tabla de Productos de acuerdo
a los criterios de busqueda impuestos
:return:
"""
medicamento = str(self.lineMedicamento.text())
monodroga = str(self.lineMonodroga.text())
data = self.getAllTabla(self.tableProductos)
if medicamento != "":
dataMedic = filter(lambda x: x[1].upper() == medicamento.upper(), data.values())
else:
dataMedic = data.values()
if monodroga != "":
dataMono = filter(lambda x: x[3].upper() == monodroga.upper(), dataMedic)
else:
dataMono = dataMedic
for dato in data:
self.tableProductos.setRowHidden(dato,False)
for dato in data:
if not data[dato] in dataMono:
self.tableProductos.setRowHidden(dato,True)
def buscarObra(self):
"""
Filtra la tabla de Obras Sociales de acuerdo
a los criterios de busqueda impuestos
:return:
"""
razon_social = str(self.lineObra.text())
cuit = str(self.lineCuit.text())
data = self.getAllTabla(self.tableObra)
if razon_social != "":
dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())
else:
dataRazon = data.values()
if cuit != "":
dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)
else:
dataCuit = dataRazon
for dato in data:
self.tableObra.setRowHidden(dato,False)
for dato in data:
if not data[dato] in dataCuit:
self.tableObra.setRowHidden(dato,True)
def actualizar(self):
"""
Actualiza la informacion de la
tabla de Productos
:return:
"""
if self.obraSocialSeleccionada!=None:
self.cargar_productos(self.obraSocialSeleccionada)
else:
self.cargarProductosSinObra()
def habilitarObras(self):
"""
Muestra las Obras Sociales si no hay una factura creada.
Si la factura ya se encuentra creada, notifica que no
es posible cambiar la Obra Social actual.
:return:
"""
if self.productosAgregados != 0:
QtGui.QMessageBox.information(self,"Aviso","Ya se han agregado productos a la factura")
else:
if not self.rbtnObra.isChecked():
self.btnBuscar.setEnabled(True)
self.lineObra.setEnabled(True)
self.lineCuit.setEnabled(True)
self.tableObra.setVisible(True)
else:
self.lineObra.clear()
self.lineCuit.clear()
self.btnBuscar.setEnabled(False)
self.lineObra.setEnabled(False)
self.lineCuit.setEnabled(False)
self.tableObra.setVisible(False)
self.obraSocialSeleccionada=None
self.cargarProductosSinObra()
def cargarProductosSinObra(self):
"""
Carga en la tabla de Productos todos los productos
sin descuento de Obra Social
:return:
"""
self.limpiarTabla(self.tableProductos)
##Cnsulta para obtener todos los productos del sistema, con su correspondiente
##codigo de barra, monodroga, descuento, importe
query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\
join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\
join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\
filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)
##Se cargan los datos obtenidos en la tabla de Producto
for n, obj in enumerate(query):
self.tableProductos.insertRow(n)
self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))
self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))
self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))
self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))
self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))
self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))
##Se carga la cantidad de cada producto en la tabla
for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):
self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))
def cargar_productos(self, obraSocial):
"""
Carga en la tabla de Productos todos los
productos del sistema con los correspondientes descuentos
de la Obra Social seleccionada
:param obraSocial:
:return:
"""
self.limpiarTabla(self.tableProductos)
query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,DescuentoModel.descuento,ProductoModel.importe).\
join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\
join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\
join(DescuentoModel).filter(DescuentoModel.producto==ProductoModel.codigo_barra).\
filter(DescuentoModel.obra_social==obraSocial,ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)
for n, obj in enumerate(query):
self.tableProductos.insertRow(n)
for m, campo in enumerate(obj):
self.tableProductos.setItem(n, m, QtGui.QTableWidgetItem(str(campo)))
for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):
self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))
def cargarObra(self):
"""
Carga la informacion de la Obra Social
seleccionada por el usuario
:return:
"""
rowActual=self.tableObra.currentItem().row()
self.lineObra.setText(str(self.tableObra.item(rowActual,0).text()))
self.lineCuit.setText(str(self.tableObra.item(rowActual,1).text()))
self.tableObra.hide()
self.lineObra.setEnabled(False)
self.lineCuit.setEnabled(False)
self.obraSocialSeleccionada = str(self.lineObra.text())
self.cargar_productos(self.obraSocialSeleccionada)
self.gbProducto.setVisible(True)
def limpiarObra(self):
"""
Permite buscar las obras sociales si aun
no hay ninguna seleccionada.
Limpia los campos correspondientes a las
Obras Sociales, si ya hay una cargada.
:return:
"""
if self.lineObra.isEnabled():
self.buscarObra()
else:
self.lineCuit.clear()
self.lineObra.clear()
self.lineCuit.setEnabled(True)
self.lineObra.setEnabled(True)
self.tableObra.setVisible(True)
def validadores(self):
camposRequeridos = [getattr(self,"lineMonodroga")]
ValidarDatos.setValidador(camposRequeridos)
camposRequeridos = [getattr(self,"lineMedicamento")]
ValidarDatos.setValidador(camposRequeridos)
def cargar_obras(self):
"""
Carga todos las obras Sociales en el sistema
en la tabla de Obras Sociales
:return:
"""
self.cargarObjetos(self.tableObra,
ObraSocialModel.buscarTodos("razon_social", self.sesion).all(),
("razon_social", "cuit", "direccion")
)
def descontarCantidad(self,detalle,producto,cantidad):
"""
Actualiza el stock en una determinada cantidad,
de un producto dado
:param detalle Detalle de la Factura :
:param producto Codigo de barra del producto:
:param cantidad Cantidad a descontar:
:return:
"""
query=LoteModel.obtenerLoteProducto(producto,self.sesion)
valores=[]
for a in query:
loteProducto=LoteProductoModel.buscarLoteProducto(self.sesion,producto,a.codigo).first()
if cantidad<=loteProducto.cantidad:
loteProducto.descontarCantidad(cantidad)
loteProducto.modificar(self.sesion)
valores.append([loteProducto,cantidad])
break
else:
cantidad-=loteProducto.cantidad
valores.append([loteProducto,loteProducto.cantidad])
loteProducto.descontarCantidad(loteProducto.cantidad)
loteProducto.modificar(self.sesion)
self.lotesVentas[detalle]=valores
detalle.agregarLotes(self.sesion,self.lotesVentas[detalle])
def agregarProducto(self):
"""
Agrega un producto seleccionada a la Factura
:return:
"""
itemActual=self.tableProductos.currentItem()
cantidad, ok = QtGui.QInputDialog.getInt(self,"Cantidad","Ingrese cantidad del producto",1,1,2000,5)
if not ok:
self.showMsjEstado("No se ha seleccionado cantidad del producto")
else:
cantidadProducto=int(self.tableProductos.item(itemActual.row(),6).text())
if cantidad>cantidadProducto:
QtGui.QMessageBox.information(self,"Aviso","La cantidad ingresada es mayor que la del stock")
else:
if self.productosAgregados == 0 and self.factura == None:
self.factura=FacturaModel(FacturaModel.generarNumero(self.sesion))
self.factura.guardar(self.sesion)
self.productosAgregados+=1
rowItemActual=itemActual.row()
rows=self.tableFactura.rowCount()
self.tableFactura.insertRow(rows)
#--Carga de items en la tabla--*
producto = int(self.tableProductos.item(rowItemActual,0).text())
importeActual=float(self.tableProductos.item(rowItemActual,5).text())
descuentoActual=float(self.tableProductos.item(rowItemActual,4).text())
subtotal=importeActual*(1-descuentoActual)
####-------------------------#####
detalleFactura=DetalleFacturaModel(self.factura.numero,producto,cantidad,
subtotal*cantidad,descuentoActual,self.productosAgregados
)
self.descontarCantidad(detalleFactura,producto,cantidad)
self.tableFactura.setItem(rows,0,QtGui.QTableWidgetItem(str(detalleFactura.producto)))
self.tableFactura.setItem(rows,1,QtGui.QTableWidgetItem(str(detalleFactura.cantidad)))
self.tableFactura.setItem(rows, 2, QtGui.QTableWidgetItem(str("%.2f"%(subtotal*cantidad))))
detalleFactura.guardar(self.sesion)
self.detallesTabla[rows] = detalleFactura
self.data[rows] = [
producto, cantidad, subtotal*cantidad, descuentoActual
]
self.actualizar()
self.objectModified.emit()
def eliminarDetalle(self):
"""
Elimina el detalle seleccionado por el usuario y actualiza
el stock del producto en particular.
:return:
"""
itemActual = self.tableFactura.currentItem()
if itemActual == None:
self.showMsjEstado("Debe seleccionar un item para dar de baja")
else:
detalle = self.detallesTabla[itemActual.row()]
for loteVenta in self.lotesVentas[detalle]:
loteVenta[0].aumentarCantidad(loteVenta[1])
loteVenta[0].modificar(self.sesion)
detalle.eliminarLotesAsociados(self.sesion)
detalle.bajaFisica(self.sesion)
del self.lotesVentas[detalle]
del self.data[itemActual.row()]
self.tableFactura.hideRow(itemActual.row())
self.actualizar()
self.productosAgregados -=1
self.objectModified.emit()
def limpiarVentana(self):
"""
Limpia la ventana actual
:return:
"""
self.productosAgregados=0
self.lotesVentas={}
self.facturaCobrada=False
self.obraSocialSeleccionada=None
self.formapago = None
self.factura = None
self.data = {}
self.detallesTabla = {}
self.lineObra.clear()
self.lineObra.setEnabled(True)
self.lineCuit.clear()
self.lineCuit.setEnabled(True)
self.tableObra.setVisible(False)
self.rbtnObra.setChecked(False)
self.limpiarTabla(self.tableProductos)
self.limpiarTabla(self.tableFactura)
self.cargarProductosSinObra()
def calcularTotal(self):
"""
Calcula el total a pagar
:return Total a Pagar:
"""
subtotales=[]
for row in range(0,self.tableFactura.rowCount()):
subtotales.append(float(self.tableFactura.item(row,2).text()))
importeTotal=sum(subtotales)
return importeTotal
def confirmarOperacion(self):
"""
Confirma la operacion si todo ha sido exitoso.
De lo contrario notifica que la Factura todavia no ha sido
cobrada o que no se efectuo ninguna venta
:return:
"""
if self.productosAgregados == 0:
QtGui.QMessageBox.information(self,"Aviso","No se ha agregado ningun producto")
else:
ventana = Cobrar(self,self.calcularTotal(),self.factura,self.sesion)
ventana.exec_()
if self.facturaCobrada:
QtGui.QMessageBox.information(self,"Venta","La venta se ha realizado con exito")
data = {}
data["numero"] = self.factura.numero
data["fecha"] = self.factura.fecha_emision
data["detalles"] = self.data.values()
data["formaPago"] = self.formapago
generarFactura(data)
self.factura.setObra(self.obraSocialSeleccionada)
self.factura.modificar(self.sesion)
self.limpiarVentana()
else:
QtGui.QMessageBox.information(self,"Aviso","La factura aun no ha sido cobrada")
def cancelarOperacion(self):
"""
Cancela la operacion actual, y reestablece
los stocks a sus valores originales
:return:
"""
ok=QtGui.QMessageBox.warning(self,"Aviso","¿Desea cancelar la operación?",\
QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok)
if ok == QtGui.QMessageBox.Ok:
if self.factura != None:
self.factura.anular()
for detalle in self.lotesVentas:
for loteVenta in self.lotesVentas[detalle]:
loteVenta[0].aumentarCantidad(loteVenta[1])
loteVenta[0].modificar(self.sesion)
detalle.eliminarLotesAsociados(self.sesion)
detalle.borrar(self.sesion)
self.objectModified.emit()
self.limpiarVentana()
def cancelarVentana(self):
if self.factura != None:
self.factura.anular()
for detalle in self.lotesVentas:
for loteVenta in self.lotesVentas[detalle]:
loteVenta[0].aumentarCantidad(loteVenta[1])
loteVenta[0].modificar(self.sesion)
detalle.eliminarLotesAsociados(self.sesion)
detalle.borrar(self.sesion)
self.objectModified.emit()
self.limpiarVentana()
def addHandlerSignal(self):
self.sender = PoolOfWindows.getVentana("VentaConRemito")
self.sender.objectModified.connect(self.actualizar)
self.sender1 = PoolOfWindows.getVentana("AltaProducto")
self.sender1.objectCreated.connect(self.actualizar)
self.sender2 = PoolOfWindows.getVentana("BajaProducto")
self.sender2.objectDeleted.connect(self.actualizar)
self.sender3 = PoolOfWindows.getVentana("ModificarProducto")
self.sender3.objectModified.connect(self.actualizar)
self.sender4 = PoolOfWindows.getVentana("DevolucionDeCliente")
self.sender4.objectModified.connect(self.actualizar)
self.sender5 = PoolOfWindows.getVentana("ModificarRemito")
self.sender5.objectModified.connect(self.actualizar)
self.sender6 = PoolOfWindows.getVentana("BajaRemito")
self.sender6.objectModified.connect(self.actualizar)
self.sender7 = PoolOfWindows.getVentana("FraccionarProducto")
self.sender7.objectModified.connect(self.actualizar)
self.sender8 = PoolOfWindows.getVentana("AltaLote")
self.sender8.objectCreated.connect(self.actualizar)
self.sender9 = PoolOfWindows.getVentana("ModificarLote")
self.sender9.objectModified.connect(self.actualizar)
class Cobrar(QtGui.QDialog, Ui_Dialog):
"""
Clase que modela la lógica de cobro de una factura
"""
def __init__(self,ventana_padre, total, factura,sesion):
"""
Constuctor de la clase Cobrar
:param ventana_padre Referncia a la ventana padre:
:param total Total a pagar:
:return:
"""
QtGui.QDialog.__init__(self,ventana_padre)
self.setupUi(self)
self.btnAceptar.pressed.connect(self.confirmar)
self.btnCancelar.pressed.connect(self.cancelar)
self.btnEliminar.pressed.connect(self.eliminar)
self.rbtnEfectivo.pressed.connect(self.cobroEfectivo)
self.rbtnNC.pressed.connect(self.cobroNC)
self.rbtnTC.pressed.connect(self.cobroTC)
self.rbtnTD.pressed.connect(self.cobroTD)
self.total_a_pagar = total
self.padre = ventana_padre
self.factura = factura
self.sesion = sesion
self.actualizar_total()
self.detalles_cobro = {}
def actualizar_total(self):
"""
Actualiza el importe a pagar en
el line de la ventana
:param total:
:return:
"""
self.lblImporte.setText("Saldo Restante: $%.2f" % self.total_a_pagar)
def cobroNC(self):
"""
Se encarga de efectuar el cobro con NC
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
self.rbtnNC.setChecked(True)
totalFactura = self.total_a_pagar
numero,ok = QtGui.QInputDialog.getText(self,"Cobro c/Nota de Crédito","Ingrese número de Nota de Crédito")
if ok:
notaCredito = NotaCreditoModel.getNotaCredito(self.padre.sesion,int(numero))
if notaCredito == None:
QtGui.QMessageBox.information(self,"Aviso","La Nota de Crédito ingresada no existe")
elif notaCredito.getTotal(self.padre.sesion) < totalFactura:
QtGui.QMessageBox.information(self,"Aviso","El monto de la Nota de Credito es insuficiente")
elif notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero) < totalFactura:
dif = notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero)
QtGui.QMessageBox.information(self,"Aviso","La Nota solo posee $" + str(dif))
else:
temp = ["Nota de Crédito",self.total_a_pagar,notaCredito.numero]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar = 0
self.actualizar_total()
self.actualizar_tabla()
def cobroTC(self):
"""
Se encarga de efectuar el cobro con Tarjeta de Crédito
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,"Cobro Tarjeta Crédito","Ingrese monto a pagar",0,0,2000,2)
if ok:
if monto_a_pagar > self.total_a_pagar:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado es mayor al total a pagar")
elif monto_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado no puede ser cero")
else:
temp = ["Tarjeta de Crédito",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar -= monto_a_pagar
self.actualizar_total()
self.actualizar_tabla()
def cobroTD(self):
"""
Se encarga de efectuar el cobro con Tarjeta de Débito
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,"Cobro Tarjeta Débito","Ingrese monto a pagar",0,0,2000,2)
if ok:
if monto_a_pagar > self.total_a_pagar:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado es mayor al total a pagar")
elif monto_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado no puede ser cero")
else:
temp = ["Tarjeta de Débito",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar -= monto_a_pagar
self.actualizar_total()
self.actualizar_tabla()
def cobroEfectivo(self):
"""
Se encarga de efectuar el cobro en efectivo del cliente
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
self.rbtnEfectivo.setChecked(True)
monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,"Cobro Efectivo","Ingrese monto a pagar",0,0,2000,2)
if ok:
if monto_a_pagar >= self.total_a_pagar:
QtGui.QMessageBox.information(self,"Cobro Efectivo","Su vuelto es:%.2f" % (monto_a_pagar - self.total_a_pagar))
temp = ["Efectivo",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar = 0
elif monto_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado no puede ser cero")
else:
temp = ["Efectivo",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar -= monto_a_pagar
self.actualizar_total()
self.actualizar_tabla()
def eliminar(self):
"""
Elimina un pago determinado
:return:
"""
itemActual = self.tablePagos.currentItem()
if itemActual == None:
self.showMsjEstado("Debe seleccionar un para poder eliminar")
else:
monto = self.detalles_cobro[itemActual.row()][1]
del self.detalles_cobro[itemActual.row()]
self.total_a_pagar += monto
self.tablePagos.setRowHidden(itemActual.row(),True)
self.actualizar_total()
def actualizar_tabla(self):
"""
Actualiza la tabla de cobros
:return:
"""
self.padre.limpiarTabla(self.tablePagos)
for row, cobro in enumerate(self.detalles_cobro.values()):
self.tablePagos.insertRow(row)
self.tablePagos.setItem(row,0,QtGui.QTableWidgetItem(cobro[0]))
self.tablePagos.setItem(row,1,QtGui.QTableWidgetItem("$"+str(cobro[1])))
def confirmar(self):
"""
Confirma los cobros efectuados
:return Tupla con la señal indicando exito y lista de cobros:
"""
if self.total_a_pagar == 0:
for cobro in self.detalles_cobro.values():
if len(cobro) == 3:
cobroCliente = CobroClienteModel(CobroClienteModel.obtenerNumero(self.sesion),self.factura.numero,\
cobro[0],cobro[1])
cobroCliente.setNC(cobro[2])
else:
cobroCliente = CobroClienteModel(CobroClienteModel.obtenerNumero(self.sesion),self.factura.numero,\
cobro[0],cobro[1])
cobroCliente.guardar(self.sesion)
if len(self.detalles_cobro.values())>1:
self.padre.formapago = "Varios"
else:
self.padre.formapago = self.detalles_cobro.values()[0][0]
self.padre.facturaCobrada = True
self.accept()
else:
QtGui.QMessageBox.information(self,"Aviso","Restan $%.2f por pagar" % self.total_a_pagar)
def cancelar(self):
"""
Cancela la operacion de cobrar
:return Tupla con la señal indicando cancelacion y None:
"""
signal = QtGui.QMessageBox.information(self,"Aviso","¿Desea cancelar la operacion?",\
QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)
if signal == QtGui.QMessageBox.Ok:
self.detalles_cobro = {}
self.padre.limpiarTabla(self.tablePagos)
self.close()
|
normal
|
{
"blob_id": "59233cd45000cd6d6ad0876eb3812599392d7c05",
"index": 9357,
"step-1": "# -*- coding:utf-8 -*-\n__author__ = 'leandro'\n\n\nfrom datetime import *\n\nfrom PyQt4 import QtGui, QtCore\n\nfrom baseDatos.ventas.venta import NotaCredito\nfrom gui import CRUDWidget,MdiWidget\nfrom ventanas import Ui_vtnDevolucionDeCliente, Ui_vtnReintegroCliente, Ui_vtnVentaContado\nfrom baseDatos.obraSocial import ObraSocial as ObraSocialModel\nfrom baseDatos.productos import Producto as ProductoModel\nfrom baseDatos.productos import Medicamento as MedicamentoModel\nfrom baseDatos.productos import Monodroga as MonodrogaModel\nfrom baseDatos.obraSocial import Descuento as DescuentoModel\nfrom baseDatos.productos import Lote as LoteModel\nfrom baseDatos.productos import LoteProducto as LoteProductoModel\nfrom baseDatos.ventas import Factura as FacturaModel\nfrom baseDatos.ventas import DetalleFactura as DetalleFacturaModel\nfrom baseDatos.ventas import NotaCredito as NotaCreditoModel\nfrom baseDatos.ventas import DetalleNotaCredito as DetalleNCModel\nfrom baseDatos.ventas import CobroCliente as CobroClienteModel\nfrom genComprobantes import generarNotaCredito,generarFactura\nfrom validarDatos import ValidarDatos\nfrom ventanas import Ui_Dialog\nfrom gui.signals import PoolOfWindows\n\nclass DevolucionDeCliente(CRUDWidget, Ui_vtnDevolucionDeCliente):\n\n \"\"\"\n Clase encargada de modelar la funcionalidad de Devolucion al Cliente\n\n \"\"\"\n\n plazo = 7\n\n def __init__(self,mdi):\n MdiWidget.__init__(self, mdi)\n self.sesion = self.mdi().window().getSesionBD()\n self.validadores()\n self.btnBuscar.pressed.connect(self.buscarFactura)\n self.tableFactura.doubleClicked.connect(self.devolverDetalle)\n self.btnAceptar.pressed.connect(self.confirmarOperacion)\n self.btnCancelar.pressed.connect(self.cancelarOperacion)\n self.lineNumero.returnPressed.connect(self.buscarFactura)\n self.facturaSeleccionada = None\n self.notaCredito = None\n self.productosSeleccionados = 0\n self.detallesDevueltos = {}\n self.lotesDevueltos = {}\n self.data = {}\n\n def validadores(self):\n camposRequeridos = [getattr(self,\"lineNumero\")]\n ValidarDatos.setValidador(camposRequeridos)\n\n def buscarFactura(self):\n \"\"\"\n Busca y carga los detalles correspondientes\n al Nro de Factura ingresado.\n :return:\n \"\"\"\n\n if not self.lineNumero.isEnabled() and self.facturaSeleccionada != None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Ya se ha seleccionado una factura\")\n elif not self.lineNumero.isEnabled():\n self.lineNumero.setEnabled(True)\n self.lineNumero.clear()\n self.limpiarTabla(self.tableFactura)\n else:\n self.numeroFacturaActual=str(self.lineNumero.text())\n if len(self.numeroFacturaActual)==0:\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"No se ha ingresado número de factura\"))\n else:\n self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)\n if self.facturaSeleccionada==None:\n QtGui.QMessageBox.warning(self,\"Aviso\",\"La factura seleccionada no existe\")\n elif self.facturaSeleccionada.getNC()!=None:\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"La factura ya ha posee una Nota de Crédito\"))\n self.facturaSeleccionada = None\n elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"El tiempo permitido para la devolución ha expirado\"))\n elif self.facturaSeleccionada.estaLiquidada(self.sesion):\n print self.facturaSeleccionada.estaLiquidada(self.sesion)\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura se encuentra liquidada a la Obra Social\")\n else:\n self.lineNumero.setEnabled(False)\n self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.sesion),\n [\"nro_linea\",\"producto\",\"cantidad\",\"importe\"])\n\n def obtenerValoresItem(self,row):\n \"\"\"\n Obtiene los valores de una fila de\n la Tabla de Detalles de Factura\n :param row Numero de Fila:\n :return Arreglo con valores de la fila:\n \"\"\"\n values=[]\n for col in range(0,self.tableFactura.columnCount()):\n values.append(self.tableFactura.item(row,col).text())\n return values\n\n def armarItem(self,item,cantidad,key):\n \"\"\"\n Genera y guarda el Detalle de la Nota de Credito\n correspondiente a una devolucion\n :param item Arreglo con informacion del Detalle de Factura:\n :param cantidad Cantidad Devuelta:\n :param key Clave del detalle de factura devuelto:\n :return:\n \"\"\"\n row=self.tableNC.rowCount()\n self.tableNC.insertRow(row)\n for col, elemento in enumerate(item[1:]):\n self.tableNC.setItem(row,col,QtGui.QTableWidgetItem(item[col+1]))\n self.tableNC.item(row,1).setText(str(cantidad))\n #Arreglo que contiene informacion del item agregado\n self.data[key] = [str(item[1]),cantidad,0,float(item[3])]\n\n def devolverDetalle(self):\n \"\"\"\n Incorpora el Detalle de Factura seleccionado\n por el usuario a la Nota de Credito\n :return:\n \"\"\"\n\n rowActual=self.tableFactura.currentItem().row()\n signal = QtGui.QMessageBox.information(self,\"Confirmación\",\"¿Desea devolver este item?\",\\\n QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)\n\n if signal == QtGui.QMessageBox.Ok:\n\n producto = int(self.tableFactura.item(rowActual,1).text())\n cantidad_detalle = int(self.tableFactura.item(rowActual,2).text())\n linea = int(self.tableFactura.item(rowActual,0).text())\n nro_factura = int(self.lineNumero.text())\n detalle = FacturaModel.getDetalle(nro_factura,linea,self.sesion)\n lotes_detalle = detalle.devolverLotes(self.sesion)\n temp = lotes_detalle\n\n finalize_actualizacion = False\n cantidad_restante = cantidad_detalle\n\n while not finalize_actualizacion:\n\n cantidad, ok = QtGui.QInputDialog.getInt(self,\"Cantidad\",\"Ingrese cantidad del producto\",1,1,2000,5)\n if ok == False:\n finalize_actualizacion = True\n self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))\n break\n lote, ok=QtGui.QInputDialog.getText(self,\"Lote\",\"Ingrese lote\")\n if ok == False:\n finalize_actualizacion = True\n self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))\n break\n if not lote in lotes_detalle.keys():\n QtGui.QMessageBox.information(self,\"Aviso\",\"El lote ingresado no es valido para este detalle\")\n elif lotes_detalle[str(lote)] == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Los productos de este lote ya han sido devueltos\")\n elif cantidad > lotes_detalle[str(lote)]:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La cantidad ingresada es mayor a la esperada para este lote\")\n else:\n temp[str(lote)] -= cantidad\n cantidad_restante -= cantidad\n self.tableFactura.item(rowActual,2).setText(str(cantidad_restante))\n\n if sum(map(lambda x: temp[x],temp)) == 0:\n self.productosSeleccionados +=1\n key = int(self.tableFactura.item(rowActual,0).text())\n self.detallesDevueltos[key] = detalle\n self.armarItem(self.obtenerValoresItem(rowActual),cantidad_detalle,key)\n self.tableFactura.removeRow(rowActual)\n finalize_actualizacion = True\n\n def limpiarVentana(self):\n \"\"\"\n Limpia los componentes de la ventana\n :return:\n \"\"\"\n self.limpiarTabla(self.tableFactura)\n self.lineNumero.setEnabled(True)\n self.lineNumero.clear()\n self.limpiarTabla(self.tableNC)\n\n def calcularTotal(self):\n \"\"\"\n Calculo el total a devolver en la\n Nota de Credito\n :return Total a Devolver:\n \"\"\"\n subtotales=[]\n for row in range(0,self.tableNC.rowCount()):\n subtotales.append(float(self.tableNC.item(row,2).text()))\n return sum(subtotales)\n\n def confirmarOperacion(self):\n \"\"\"\n Imprime la Nota de Credito, una vez que el\n usuario confirmo la operacion.\n :return:\n \"\"\"\n\n if self.productosSeleccionados != 0:\n\n nc = NotaCreditoModel(NotaCreditoModel.generarNumero(self.sesion))\n nc.guardar(self.sesion)\n for nro_lnc, nro_lfactura in enumerate(self.detallesDevueltos):\n detalle_nc = DetalleNCModel(nc.numero,nro_lnc+1,self.facturaSeleccionada.numero,nro_lfactura)\n detalle_nc.setImporte(self.data[nro_lfactura][3])\n detalle_nc.guardar(self.sesion)\n self.detallesDevueltos[nro_lfactura].devolver(self.sesion) # Devuelve el detalle asociado de la factura\n self.facturaSeleccionada.setNC(nc.numero)\n self.facturaSeleccionada.modificar(self.sesion)\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura ha sido devuelta\")\n self.objectModified.emit()\n\n cobros = self.facturaSeleccionada.getCobros(self.sesion)\n if len(cobros) == 1 and cobros[0].tipo == \"Efectivo\":\n QtGui.QMessageBox.information(self,\"Devolucion\",\"El importe en efectivo a entregar es de: $%.2f\" % self.calcularTotal())\n\n #Se genera un diccionario con los datos necesarios para imprimir la nota de credito\n data = {}\n data[\"numero\"] = nc.numero\n data[\"fecha\"] = nc.fecha_emision\n data[\"detalles\"] = self.data.values()\n generarNotaCredito(data)\n\n self.facturaSeleccionada=None\n self.productosSeleccionados=0\n self.detallesDevueltos = {}\n self.limpiarVentana()\n self.data = {}\n\n else:\n QtGui.QMessageBox.information(self,\"Devolucion Cliente\",\"No se ha agregado ningun producto para devolver\")\n\n def cancelarOperacion(self):\n \"\"\"\n Anula la Nota de Credito creada, actuliza el stock\n de los productos a sus valores originales y limpia la ventana.\n Si la Nota de Credito no fue creada limpia la ventana.\n :return:\n \"\"\"\n\n signal = QtGui.QMessageBox.warning(self,\"Advertencia\",QtCore.QString.fromUtf8(\"¿Desea cancelar la operación?\"),\\\n QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)\n if signal == QtGui.QMessageBox.Ok:\n self.data = {}\n self.facturaSeleccionada = None\n self.productosSeleccionados = 0\n self.detallesDevueltos = {}\n self.limpiarVentana()\n\n def cancelarVentana(self):\n self.data = {}\n self.facturaSeleccionada = None\n self.productosSeleccionados = 0\n self.detallesDevueltos = {}\n self.limpiarVentana()\n\nclass ReintegroCliente(CRUDWidget, Ui_vtnReintegroCliente):\n\n \"\"\"\n Clase encargada de modelar la funcionalidad de Reintegro al cliente\n\n \"\"\"\n\n plazo = 7\n\n def __init__(self, mdi):\n MdiWidget.__init__(self, mdi)\n self.sesion = self.mdi().window().getSesionBD()\n self.cargarObras()\n self.validadores()\n self.btnBuscarOs.pressed.connect(self.buscarOs)\n self.tableOs.itemDoubleClicked.connect(self.obtenerObra)\n self.btnBuscarFac.pressed.connect(self.buscarFactura)\n self.lineRazon.returnPressed.connect(self.filtrarObra)\n self.lineCuit.returnPressed.connect(self.filtrarObra)\n self.lineNumeroFac.returnPressed.connect(self.buscarFactura)\n self.btnAceptar.pressed.connect(self.confirmarOperacion)\n self.btnCancelar.pressed.connect(self.cancelarOperacion)\n self.tableFactura.itemDoubleClicked.connect(self.agregarProducto)\n self.gbFactura.setEnabled(False)\n self.gbNotaCredito.setEnabled(False)\n self.detallesReintegrables = []\n self.detallesImprimibles = []\n self.obraSocial = None\n self.facturaSeleccionada = None\n\n def filtrarObra(self):\n \"\"\"\n Filtra la tabla de Obras Sociales de acuerdo\n a los criterios de busqueda impuestos\n :return:\n \"\"\"\n razon_social = str(self.lineRazon.text())\n cuit = str(self.lineCuit.text())\n data = self.getAllTabla(self.tableOs)\n\n if razon_social != \"\":\n dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())\n else:\n dataRazon = data.values()\n if cuit != \"\":\n dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)\n else:\n dataCuit = dataRazon\n\n for dato in data:\n self.tableOs.setRowHidden(dato,False)\n\n for dato in data:\n if not data[dato] in dataCuit:\n self.tableOs.setRowHidden(dato,True)\n\n def cargarObras(self):\n \"\"\"\n Carga las Obras Sociales disponibles\n en la tabla correspondiente\n :return:\n \"\"\"\n self.cargarObjetos(self.tableOs,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )\n\n def validadores(self):\n \"\"\"\n Setea los validadores correspondientes a\n los campos de la ventana\n :return:\n \"\"\"\n\n camposRequeridos = [getattr(self,\"lineRazon\")]\n ValidarDatos.setValidador(camposRequeridos)\n\n camposRequeridos = [getattr(self,\"lineCuit\")]\n ValidarDatos.setValidador(camposRequeridos)\n\n camposRequeridos = [getattr(self,\"lineNumeroFac\")]\n ValidarDatos.setValidador(camposRequeridos)\n\n def buscarOs(self):\n \"\"\"\n Busca una Obra Social de acuerdo\n a los criterios del usuario\n :return:\n \"\"\"\n\n if self.lineRazon.isEnabled():\n self.filtrarObra()\n\n elif not self.lineRazon.isEnabled() and (self.tableNC.rowCount() != 0 or self.tableFactura.rowCount() != 0):\n QtGui.QMessageBox.information(self,\"Aviso\",\"Imposible cambiar de Obra Social. Ya se ha seleccionado\\\n una\")\n else:\n self.gbNotaCredito.setEnabled(False)\n self.gbFactura.setEnabled(False)\n self.lineRazon.clear()\n self.lineRazon.setEnabled(True)\n self.lineCuit.clear()\n self.lineCuit.setEnabled(True)\n self.tableOs.setEnabled(True)\n\n def obtenerObra(self):\n \"\"\"\n Carga la Obra Social seleccionada\n en los campos correspondientes.\n :return:\n \"\"\"\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)\n\n def buscarFactura(self):\n \"\"\"\n Busca la factura indica por el usuario.\n En caso de no existir, notifica lo mismo\n :return:\n \"\"\"\n if not self.lineNumeroFac.isEnabled() and self.tableNC.rowCount() != 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Ya se ha seleccionado una factura\")\n elif not self.lineNumeroFac.isEnabled():\n self.lineNumeroFac.setEnabled(True)\n self.lineNumeroFac.clear()\n self.limpiarTabla(self.tableFactura)\n else:\n self.numeroFacturaActual=str(self.lineNumeroFac.text())\n if len(self.numeroFacturaActual)==0:\n self.showMsjEstado(\"No se ha ingresado numero de factura\")\n else:\n self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)\n if self.facturaSeleccionada==None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura seleccionada no existe\")\n elif self.facturaSeleccionada.getObra() != None and self.facturaSeleccionada.getObra() != self.obraSocial:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Obra Social seleccionada no corresponde con la factura\")\n elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():\n QtGui.QMessageBox.information(self,\"Aviso\",\"El tiempo permitido para el reintegro ha expirado\")\n elif self.facturaSeleccionada.estaLiquidada(self.sesion):\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura se encuentra liquidada a la Obra Social\")\n elif self.facturaSeleccionada.getNC()!=None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura ya posee una Nota de Crédito\")\n else:\n self.lineNumeroFac.setEnabled(False)\n if self.facturaSeleccionada.getObra() == None:\n self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.obraSocial, self.sesion),\n [\"producto\",\"cantidad\",\"importe\"])\n else:\n self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetallesSinDescuento(self.sesion),\n [\"producto\",\"cantidad\",\"importe\"])\n\n def agregarProducto(self):\n \"\"\"\n Agrega un producto a la Nota de Credito\n :return:\n \"\"\"\n itemActual=self.tableFactura.currentItem()\n producto = int(self.tableFactura.item(itemActual.row(),0).text())\n descuento = DescuentoModel.buscar(DescuentoModel.obra_social,self.sesion,self.obraSocial).\\\n filter(DescuentoModel.producto==producto)[0].descuento\n cantidad = int(self.tableFactura.item(itemActual.row(), 1).text())\n importe = float(self.tableFactura.item(itemActual.row(), 2).text()) * descuento\n row = self.tableNC.rowCount()\n self.tableNC.insertRow(row)\n self.tableNC.setItem(row, 0, QtGui.QTableWidgetItem(str(producto)))\n self.tableNC.setItem(row, 1, QtGui.QTableWidgetItem(str(cantidad)))\n self.tableNC.setItem(row, 2, QtGui.QTableWidgetItem(str(importe)))\n self.detallesReintegrables.append([int(self.numeroFacturaActual),itemActual.row()+1,descuento,importe])\n self.detallesImprimibles.append([producto,cantidad,descuento,importe])\n self.tableFactura.hideRow(itemActual.row())\n\n def limpiarVentana(self):\n \"\"\"\n Limpia la ventana una vez que la operacion finalizó\n :return:\n \"\"\"\n self.obraSocial = None\n self.facturaSeleccionada = None\n self.detallesReintegrables = []\n self.detallesImprimibles = []\n self.limpiarTabla(self.tableFactura)\n self.limpiarTabla(self.tableNC)\n self.lineCuit.clear()\n self.lineRazon.clear()\n self.lineNumeroFac.clear()\n self.lineCuit.setEnabled(True)\n self.lineRazon.setEnabled(True)\n self.tableOs.setEnabled(True)\n self.lineNumeroFac.setEnabled(True)\n self.gbFactura.setEnabled(False)\n self.gbNotaCredito.setEnabled(False)\n\n def confirmarOperacion(self):\n \"\"\"\n Confirma la operacion y asienta los datos de la\n Nota de Credito en la BD.\n :return:\n \"\"\"\n\n if self.tableNC.rowCount() == 0 :\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"No se han agregado productos a la Nota de Crédito\"))\n\n else:\n ok = QtGui.QMessageBox.information(self,QtCore.QString.fromUtf8(\"Confirmación\"),\\\n QtCore.QString.fromUtf8(\"¿Desea generar la Nota Crédito?\"),\\\n QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)\n\n if (ok==1):\n notaCredito = NotaCreditoModel(NotaCredito.generarNumero(self.sesion))\n notaCredito.guardar(self.sesion)\n for lineaNC, data in enumerate(self.detallesReintegrables):\n detalleNC = DetalleNCModel(notaCredito.numero, lineaNC+1, data[0], data[1])\n detalleNC.setImporte(data[3])\n detalleNC.setDescuento(data[2])\n detalleNC.guardar(self.sesion)\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"La Nota de Crédito ha sido generada con éxito\"))\n self.facturaSeleccionada.setNC(notaCredito.numero)\n self.facturaSeleccionada.modificar(self.sesion)\n\n #Se genera un diccionario con los datos necesarios para imprimir la nota de credito\n data = {}\n data[\"numero\"] = notaCredito.numero\n data[\"fecha\"] = notaCredito.fecha_emision\n data[\"detalles\"] = self.detallesImprimibles\n generarNotaCredito(data)\n self.limpiarVentana()\n\n else:\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"La Nota de Crédito no ha sido generada\"))\n\n def cancelarOperacion(self):\n \"\"\"\n Cancela la operacion en curso y limpia la ventana\n :return:\n \"\"\"\n\n ok = QtGui.QMessageBox.information(self,\"Confirmacion\",\"¿Desea cancelar la operacion?\",\\\n QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)\n if (ok==1):\n self.limpiarVentana()\n\n def cancelarVentana(self):\n\n self.limpiarVentana()\n\nclass VentaContado(CRUDWidget, Ui_vtnVentaContado):\n\n \"\"\"\n Clase encargada de modelar el comportamiento de Venta al Contado\n\n \"\"\"\n\n def __init__(self,mdi):\n \"\"\"\n Constructor de la clase VentaContado\n :param mdi:\n :return:\n \"\"\"\n MdiWidget.__init__(self, mdi)\n self.sesion = self.mdi().window().getSesionBD()\n self.validadores()\n self.cargar_obras()\n self.lineMedicamento.returnPressed.connect(self.buscarProd)\n self.lineMonodroga.returnPressed.connect(self.buscarProd)\n self.lineCuit.returnPressed.connect(self.buscarObra)\n self.lineObra.returnPressed.connect(self.buscarObra)\n self.tableObra.itemDoubleClicked.connect(self.cargarObra)\n self.tableProductos.itemDoubleClicked.connect(self.agregarProducto)\n self.btnBuscar.pressed.connect(self.limpiarObra)\n self.btnAceptar.pressed.connect(self.confirmarOperacion)\n self.btnCancelar.pressed.connect(self.cancelarOperacion)\n self.btnEliminar.pressed.connect(self.eliminarDetalle)\n self.rbtnObra.pressed.connect(self.habilitarObras)\n self.btnBuscar.setEnabled(False)\n self.tableObra.setVisible(False)\n self.lineCuit.setEnabled(False)\n self.lineObra.setEnabled(False)\n self.cargarProductosSinObra()\n self.productosAgregados=0\n self.lotesVentas={}\n self.facturaCobrada=False\n self.obraSocialSeleccionada=None\n self.formapago = None\n self.factura = None\n self.data = {}\n self.detallesTabla = {}\n\n def buscarProd(self):\n \"\"\"\n Filtra la tabla de Productos de acuerdo\n a los criterios de busqueda impuestos\n :return:\n \"\"\"\n medicamento = str(self.lineMedicamento.text())\n monodroga = str(self.lineMonodroga.text())\n data = self.getAllTabla(self.tableProductos)\n\n if medicamento != \"\":\n dataMedic = filter(lambda x: x[1].upper() == medicamento.upper(), data.values())\n else:\n dataMedic = data.values()\n if monodroga != \"\":\n dataMono = filter(lambda x: x[3].upper() == monodroga.upper(), dataMedic)\n else:\n dataMono = dataMedic\n\n for dato in data:\n self.tableProductos.setRowHidden(dato,False)\n\n for dato in data:\n if not data[dato] in dataMono:\n self.tableProductos.setRowHidden(dato,True)\n\n def buscarObra(self):\n \"\"\"\n Filtra la tabla de Obras Sociales de acuerdo\n a los criterios de busqueda impuestos\n :return:\n \"\"\"\n razon_social = str(self.lineObra.text())\n cuit = str(self.lineCuit.text())\n data = self.getAllTabla(self.tableObra)\n\n if razon_social != \"\":\n dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())\n else:\n dataRazon = data.values()\n if cuit != \"\":\n dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)\n else:\n dataCuit = dataRazon\n\n for dato in data:\n self.tableObra.setRowHidden(dato,False)\n\n for dato in data:\n if not data[dato] in dataCuit:\n self.tableObra.setRowHidden(dato,True)\n\n def actualizar(self):\n \"\"\"\n Actualiza la informacion de la\n tabla de Productos\n :return:\n \"\"\"\n if self.obraSocialSeleccionada!=None:\n self.cargar_productos(self.obraSocialSeleccionada)\n else:\n self.cargarProductosSinObra()\n\n def habilitarObras(self):\n \"\"\"\n Muestra las Obras Sociales si no hay una factura creada.\n Si la factura ya se encuentra creada, notifica que no\n es posible cambiar la Obra Social actual.\n :return:\n \"\"\"\n if self.productosAgregados != 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Ya se han agregado productos a la factura\")\n else:\n if not self.rbtnObra.isChecked():\n self.btnBuscar.setEnabled(True)\n self.lineObra.setEnabled(True)\n self.lineCuit.setEnabled(True)\n self.tableObra.setVisible(True)\n else:\n self.lineObra.clear()\n self.lineCuit.clear()\n self.btnBuscar.setEnabled(False)\n self.lineObra.setEnabled(False)\n self.lineCuit.setEnabled(False)\n self.tableObra.setVisible(False)\n self.obraSocialSeleccionada=None\n self.cargarProductosSinObra()\n\n def cargarProductosSinObra(self):\n \"\"\"\n Carga en la tabla de Productos todos los productos\n sin descuento de Obra Social\n :return:\n \"\"\"\n\n self.limpiarTabla(self.tableProductos)\n\n ##Cnsulta para obtener todos los productos del sistema, con su correspondiente\n ##codigo de barra, monodroga, descuento, importe\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n ##Se cargan los datos obtenidos en la tabla de Producto\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))\n self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))\n self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))\n self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))\n self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))\n self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))\n\n ##Se carga la cantidad de cada producto en la tabla\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))\n\n def cargar_productos(self, obraSocial):\n \"\"\"\n Carga en la tabla de Productos todos los\n productos del sistema con los correspondientes descuentos\n de la Obra Social seleccionada\n :param obraSocial:\n :return:\n \"\"\"\n self.limpiarTabla(self.tableProductos)\n\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,DescuentoModel.descuento,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n join(DescuentoModel).filter(DescuentoModel.producto==ProductoModel.codigo_barra).\\\n filter(DescuentoModel.obra_social==obraSocial,ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n for m, campo in enumerate(obj):\n self.tableProductos.setItem(n, m, QtGui.QTableWidgetItem(str(campo)))\n\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))\n\n def cargarObra(self):\n \"\"\"\n Carga la informacion de la Obra Social\n seleccionada por el usuario\n :return:\n \"\"\"\n rowActual=self.tableObra.currentItem().row()\n self.lineObra.setText(str(self.tableObra.item(rowActual,0).text()))\n self.lineCuit.setText(str(self.tableObra.item(rowActual,1).text()))\n self.tableObra.hide()\n self.lineObra.setEnabled(False)\n self.lineCuit.setEnabled(False)\n self.obraSocialSeleccionada = str(self.lineObra.text())\n self.cargar_productos(self.obraSocialSeleccionada)\n self.gbProducto.setVisible(True)\n\n def limpiarObra(self):\n \"\"\"\n Permite buscar las obras sociales si aun\n no hay ninguna seleccionada.\n Limpia los campos correspondientes a las\n Obras Sociales, si ya hay una cargada.\n :return:\n \"\"\"\n\n if self.lineObra.isEnabled():\n self.buscarObra()\n else:\n self.lineCuit.clear()\n self.lineObra.clear()\n self.lineCuit.setEnabled(True)\n self.lineObra.setEnabled(True)\n self.tableObra.setVisible(True)\n\n def validadores(self):\n\n camposRequeridos = [getattr(self,\"lineMonodroga\")]\n ValidarDatos.setValidador(camposRequeridos)\n\n camposRequeridos = [getattr(self,\"lineMedicamento\")]\n ValidarDatos.setValidador(camposRequeridos)\n\n def cargar_obras(self):\n \"\"\"\n Carga todos las obras Sociales en el sistema\n en la tabla de Obras Sociales\n :return:\n \"\"\"\n self.cargarObjetos(self.tableObra,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )\n\n def descontarCantidad(self,detalle,producto,cantidad):\n \"\"\"\n Actualiza el stock en una determinada cantidad,\n de un producto dado\n :param detalle Detalle de la Factura :\n :param producto Codigo de barra del producto:\n :param cantidad Cantidad a descontar:\n :return:\n \"\"\"\n query=LoteModel.obtenerLoteProducto(producto,self.sesion)\n valores=[]\n for a in query:\n loteProducto=LoteProductoModel.buscarLoteProducto(self.sesion,producto,a.codigo).first()\n if cantidad<=loteProducto.cantidad:\n loteProducto.descontarCantidad(cantidad)\n loteProducto.modificar(self.sesion)\n valores.append([loteProducto,cantidad])\n break\n else:\n cantidad-=loteProducto.cantidad\n valores.append([loteProducto,loteProducto.cantidad])\n loteProducto.descontarCantidad(loteProducto.cantidad)\n loteProducto.modificar(self.sesion)\n self.lotesVentas[detalle]=valores\n detalle.agregarLotes(self.sesion,self.lotesVentas[detalle])\n\n def agregarProducto(self):\n \"\"\"\n Agrega un producto seleccionada a la Factura\n :return:\n \"\"\"\n itemActual=self.tableProductos.currentItem()\n cantidad, ok = QtGui.QInputDialog.getInt(self,\"Cantidad\",\"Ingrese cantidad del producto\",1,1,2000,5)\n if not ok:\n self.showMsjEstado(\"No se ha seleccionado cantidad del producto\")\n else:\n cantidadProducto=int(self.tableProductos.item(itemActual.row(),6).text())\n if cantidad>cantidadProducto:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La cantidad ingresada es mayor que la del stock\")\n else:\n if self.productosAgregados == 0 and self.factura == None:\n self.factura=FacturaModel(FacturaModel.generarNumero(self.sesion))\n self.factura.guardar(self.sesion)\n self.productosAgregados+=1\n rowItemActual=itemActual.row()\n rows=self.tableFactura.rowCount()\n self.tableFactura.insertRow(rows)\n\n #--Carga de items en la tabla--*\n producto = int(self.tableProductos.item(rowItemActual,0).text())\n importeActual=float(self.tableProductos.item(rowItemActual,5).text())\n descuentoActual=float(self.tableProductos.item(rowItemActual,4).text())\n subtotal=importeActual*(1-descuentoActual)\n ####-------------------------#####\n detalleFactura=DetalleFacturaModel(self.factura.numero,producto,cantidad,\n subtotal*cantidad,descuentoActual,self.productosAgregados\n )\n self.descontarCantidad(detalleFactura,producto,cantidad)\n self.tableFactura.setItem(rows,0,QtGui.QTableWidgetItem(str(detalleFactura.producto)))\n self.tableFactura.setItem(rows,1,QtGui.QTableWidgetItem(str(detalleFactura.cantidad)))\n self.tableFactura.setItem(rows, 2, QtGui.QTableWidgetItem(str(\"%.2f\"%(subtotal*cantidad))))\n\n detalleFactura.guardar(self.sesion)\n self.detallesTabla[rows] = detalleFactura\n\n self.data[rows] = [\n producto, cantidad, subtotal*cantidad, descuentoActual\n ]\n\n self.actualizar()\n self.objectModified.emit()\n\n def eliminarDetalle(self):\n \"\"\"\n Elimina el detalle seleccionado por el usuario y actualiza\n el stock del producto en particular.\n :return:\n \"\"\"\n\n itemActual = self.tableFactura.currentItem()\n if itemActual == None:\n self.showMsjEstado(\"Debe seleccionar un item para dar de baja\")\n else:\n detalle = self.detallesTabla[itemActual.row()]\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.bajaFisica(self.sesion)\n del self.lotesVentas[detalle]\n del self.data[itemActual.row()]\n self.tableFactura.hideRow(itemActual.row())\n self.actualizar()\n self.productosAgregados -=1\n self.objectModified.emit()\n\n def limpiarVentana(self):\n \"\"\"\n Limpia la ventana actual\n :return:\n \"\"\"\n\n self.productosAgregados=0\n self.lotesVentas={}\n self.facturaCobrada=False\n self.obraSocialSeleccionada=None\n self.formapago = None\n self.factura = None\n self.data = {}\n self.detallesTabla = {}\n self.lineObra.clear()\n self.lineObra.setEnabled(True)\n self.lineCuit.clear()\n self.lineCuit.setEnabled(True)\n self.tableObra.setVisible(False)\n self.rbtnObra.setChecked(False)\n self.limpiarTabla(self.tableProductos)\n self.limpiarTabla(self.tableFactura)\n self.cargarProductosSinObra()\n\n def calcularTotal(self):\n \"\"\"\n Calcula el total a pagar\n :return Total a Pagar:\n \"\"\"\n subtotales=[]\n for row in range(0,self.tableFactura.rowCount()):\n subtotales.append(float(self.tableFactura.item(row,2).text()))\n importeTotal=sum(subtotales)\n return importeTotal\n\n def confirmarOperacion(self):\n \"\"\"\n Confirma la operacion si todo ha sido exitoso.\n De lo contrario notifica que la Factura todavia no ha sido\n cobrada o que no se efectuo ninguna venta\n :return:\n \"\"\"\n if self.productosAgregados == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"No se ha agregado ningun producto\")\n else:\n ventana = Cobrar(self,self.calcularTotal(),self.factura,self.sesion)\n ventana.exec_()\n if self.facturaCobrada:\n QtGui.QMessageBox.information(self,\"Venta\",\"La venta se ha realizado con exito\")\n data = {}\n data[\"numero\"] = self.factura.numero\n data[\"fecha\"] = self.factura.fecha_emision\n data[\"detalles\"] = self.data.values()\n data[\"formaPago\"] = self.formapago\n generarFactura(data)\n self.factura.setObra(self.obraSocialSeleccionada)\n self.factura.modificar(self.sesion)\n self.limpiarVentana()\n else:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura aun no ha sido cobrada\")\n\n def cancelarOperacion(self):\n \"\"\"\n Cancela la operacion actual, y reestablece\n los stocks a sus valores originales\n :return:\n \"\"\"\n\n ok=QtGui.QMessageBox.warning(self,\"Aviso\",\"¿Desea cancelar la operación?\",\\\n QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok)\n if ok == QtGui.QMessageBox.Ok:\n if self.factura != None:\n self.factura.anular()\n for detalle in self.lotesVentas:\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.borrar(self.sesion)\n self.objectModified.emit()\n self.limpiarVentana()\n\n def cancelarVentana(self):\n\n if self.factura != None:\n self.factura.anular()\n for detalle in self.lotesVentas:\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.borrar(self.sesion)\n self.objectModified.emit()\n self.limpiarVentana()\n\n def addHandlerSignal(self):\n\n self.sender = PoolOfWindows.getVentana(\"VentaConRemito\")\n self.sender.objectModified.connect(self.actualizar)\n self.sender1 = PoolOfWindows.getVentana(\"AltaProducto\")\n self.sender1.objectCreated.connect(self.actualizar)\n self.sender2 = PoolOfWindows.getVentana(\"BajaProducto\")\n self.sender2.objectDeleted.connect(self.actualizar)\n self.sender3 = PoolOfWindows.getVentana(\"ModificarProducto\")\n self.sender3.objectModified.connect(self.actualizar)\n self.sender4 = PoolOfWindows.getVentana(\"DevolucionDeCliente\")\n self.sender4.objectModified.connect(self.actualizar)\n self.sender5 = PoolOfWindows.getVentana(\"ModificarRemito\")\n self.sender5.objectModified.connect(self.actualizar)\n self.sender6 = PoolOfWindows.getVentana(\"BajaRemito\")\n self.sender6.objectModified.connect(self.actualizar)\n self.sender7 = PoolOfWindows.getVentana(\"FraccionarProducto\")\n self.sender7.objectModified.connect(self.actualizar)\n self.sender8 = PoolOfWindows.getVentana(\"AltaLote\")\n self.sender8.objectCreated.connect(self.actualizar)\n self.sender9 = PoolOfWindows.getVentana(\"ModificarLote\")\n self.sender9.objectModified.connect(self.actualizar)\n\nclass Cobrar(QtGui.QDialog, Ui_Dialog):\n \"\"\"\n Clase que modela la lógica de cobro de una factura\n \"\"\"\n\n def __init__(self,ventana_padre, total, factura,sesion):\n \"\"\"\n Constuctor de la clase Cobrar\n :param ventana_padre Referncia a la ventana padre:\n :param total Total a pagar:\n :return:\n \"\"\"\n QtGui.QDialog.__init__(self,ventana_padre)\n self.setupUi(self)\n self.btnAceptar.pressed.connect(self.confirmar)\n self.btnCancelar.pressed.connect(self.cancelar)\n self.btnEliminar.pressed.connect(self.eliminar)\n self.rbtnEfectivo.pressed.connect(self.cobroEfectivo)\n self.rbtnNC.pressed.connect(self.cobroNC)\n self.rbtnTC.pressed.connect(self.cobroTC)\n self.rbtnTD.pressed.connect(self.cobroTD)\n self.total_a_pagar = total\n self.padre = ventana_padre\n self.factura = factura\n self.sesion = sesion\n self.actualizar_total()\n self.detalles_cobro = {}\n\n def actualizar_total(self):\n \"\"\"\n Actualiza el importe a pagar en\n el line de la ventana\n :param total:\n :return:\n \"\"\"\n self.lblImporte.setText(\"Saldo Restante: $%.2f\" % self.total_a_pagar)\n\n def cobroNC(self):\n \"\"\"\n Se encarga de efectuar el cobro con NC\n :return:\n \"\"\"\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n self.rbtnNC.setChecked(True)\n totalFactura = self.total_a_pagar\n numero,ok = QtGui.QInputDialog.getText(self,\"Cobro c/Nota de Crédito\",\"Ingrese número de Nota de Crédito\")\n if ok:\n notaCredito = NotaCreditoModel.getNotaCredito(self.padre.sesion,int(numero))\n if notaCredito == None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Nota de Crédito ingresada no existe\")\n elif notaCredito.getTotal(self.padre.sesion) < totalFactura:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto de la Nota de Credito es insuficiente\")\n elif notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero) < totalFactura:\n dif = notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero)\n QtGui.QMessageBox.information(self,\"Aviso\",\"La Nota solo posee $\" + str(dif))\n else:\n temp = [\"Nota de Crédito\",self.total_a_pagar,notaCredito.numero]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar = 0\n self.actualizar_total()\n self.actualizar_tabla()\n\n def cobroTC(self):\n \"\"\"\n Se encarga de efectuar el cobro con Tarjeta de Crédito\n :return:\n \"\"\"\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,\"Cobro Tarjeta Crédito\",\"Ingrese monto a pagar\",0,0,2000,2)\n if ok:\n if monto_a_pagar > self.total_a_pagar:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto ingresado es mayor al total a pagar\")\n elif monto_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto ingresado no puede ser cero\")\n else:\n temp = [\"Tarjeta de Crédito\",monto_a_pagar]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar -= monto_a_pagar\n self.actualizar_total()\n self.actualizar_tabla()\n\n def cobroTD(self):\n \"\"\"\n Se encarga de efectuar el cobro con Tarjeta de Débito\n :return:\n \"\"\"\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,\"Cobro Tarjeta Débito\",\"Ingrese monto a pagar\",0,0,2000,2)\n if ok:\n if monto_a_pagar > self.total_a_pagar:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto ingresado es mayor al total a pagar\")\n elif monto_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto ingresado no puede ser cero\")\n else:\n temp = [\"Tarjeta de Débito\",monto_a_pagar]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar -= monto_a_pagar\n self.actualizar_total()\n self.actualizar_tabla()\n\n def cobroEfectivo(self):\n \"\"\"\n Se encarga de efectuar el cobro en efectivo del cliente\n :return:\n \"\"\"\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n self.rbtnEfectivo.setChecked(True)\n monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,\"Cobro Efectivo\",\"Ingrese monto a pagar\",0,0,2000,2)\n\n if ok:\n if monto_a_pagar >= self.total_a_pagar:\n QtGui.QMessageBox.information(self,\"Cobro Efectivo\",\"Su vuelto es:%.2f\" % (monto_a_pagar - self.total_a_pagar))\n temp = [\"Efectivo\",monto_a_pagar]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar = 0\n elif monto_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto ingresado no puede ser cero\")\n else:\n temp = [\"Efectivo\",monto_a_pagar]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar -= monto_a_pagar\n\n self.actualizar_total()\n self.actualizar_tabla()\n\n def eliminar(self):\n \"\"\"\n Elimina un pago determinado\n :return:\n \"\"\"\n\n itemActual = self.tablePagos.currentItem()\n if itemActual == None:\n self.showMsjEstado(\"Debe seleccionar un para poder eliminar\")\n else:\n monto = self.detalles_cobro[itemActual.row()][1]\n del self.detalles_cobro[itemActual.row()]\n self.total_a_pagar += monto\n self.tablePagos.setRowHidden(itemActual.row(),True)\n self.actualizar_total()\n\n def actualizar_tabla(self):\n \"\"\"\n Actualiza la tabla de cobros\n :return:\n \"\"\"\n\n self.padre.limpiarTabla(self.tablePagos)\n for row, cobro in enumerate(self.detalles_cobro.values()):\n self.tablePagos.insertRow(row)\n self.tablePagos.setItem(row,0,QtGui.QTableWidgetItem(cobro[0]))\n self.tablePagos.setItem(row,1,QtGui.QTableWidgetItem(\"$\"+str(cobro[1])))\n\n def confirmar(self):\n \"\"\"\n Confirma los cobros efectuados\n :return Tupla con la señal indicando exito y lista de cobros:\n \"\"\"\n\n if self.total_a_pagar == 0:\n\n for cobro in self.detalles_cobro.values():\n if len(cobro) == 3:\n cobroCliente = CobroClienteModel(CobroClienteModel.obtenerNumero(self.sesion),self.factura.numero,\\\n cobro[0],cobro[1])\n cobroCliente.setNC(cobro[2])\n else:\n cobroCliente = CobroClienteModel(CobroClienteModel.obtenerNumero(self.sesion),self.factura.numero,\\\n cobro[0],cobro[1])\n\n cobroCliente.guardar(self.sesion)\n\n if len(self.detalles_cobro.values())>1:\n self.padre.formapago = \"Varios\"\n else:\n self.padre.formapago = self.detalles_cobro.values()[0][0]\n\n self.padre.facturaCobrada = True\n self.accept()\n else:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Restan $%.2f por pagar\" % self.total_a_pagar)\n\n def cancelar(self):\n \"\"\"\n Cancela la operacion de cobrar\n :return Tupla con la señal indicando cancelacion y None:\n \"\"\"\n\n signal = QtGui.QMessageBox.information(self,\"Aviso\",\"¿Desea cancelar la operacion?\",\\\n QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)\n if signal == QtGui.QMessageBox.Ok:\n self.detalles_cobro = {}\n self.padre.limpiarTabla(self.tablePagos)\n self.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Ui_aboutDialog(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_aboutDialog(object):
def setupUi(self, aboutDialog):
aboutDialog.setObjectName('aboutDialog')
aboutDialog.resize(400, 175)
self.label = QtWidgets.QLabel(aboutDialog)
self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))
self.label.setObjectName('label')
self.label_2 = QtWidgets.QLabel(aboutDialog)
self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))
self.label_2.setObjectName('label_2')
self.label_3 = QtWidgets.QLabel(aboutDialog)
self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))
self.label_3.setOpenExternalLinks(True)
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(aboutDialog)
self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))
self.label_4.setObjectName('label_4')
self.label_5 = QtWidgets.QLabel(aboutDialog)
self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))
self.label_5.setObjectName('label_5')
self.retranslateUi(aboutDialog)
QtCore.QMetaObject.connectSlotsByName(aboutDialog)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_aboutDialog(object):
def setupUi(self, aboutDialog):
aboutDialog.setObjectName('aboutDialog')
aboutDialog.resize(400, 175)
self.label = QtWidgets.QLabel(aboutDialog)
self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))
self.label.setObjectName('label')
self.label_2 = QtWidgets.QLabel(aboutDialog)
self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))
self.label_2.setObjectName('label_2')
self.label_3 = QtWidgets.QLabel(aboutDialog)
self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))
self.label_3.setOpenExternalLinks(True)
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(aboutDialog)
self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))
self.label_4.setObjectName('label_4')
self.label_5 = QtWidgets.QLabel(aboutDialog)
self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))
self.label_5.setObjectName('label_5')
self.retranslateUi(aboutDialog)
QtCore.QMetaObject.connectSlotsByName(aboutDialog)
def retranslateUi(self, aboutDialog):
_translate = QtCore.QCoreApplication.translate
aboutDialog.setWindowTitle(_translate('aboutDialog', 'About'))
self.label.setText(_translate('aboutDialog', 'About'))
self.label_2.setText(_translate('aboutDialog',
'Author: Andrew Christiansen'))
self.label_3.setText(_translate('aboutDialog',
'Homepage: <a href="https://github.com/drewtchrist/pylabeler">https://github.com/drewtchrist/pylabeler</a>'
))
self.label_4.setText(_translate('aboutDialog', 'Version: 0.1.0'))
self.label_5.setText(_translate('aboutDialog', 'License: MIT'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_aboutDialog(object):
def setupUi(self, aboutDialog):
aboutDialog.setObjectName('aboutDialog')
aboutDialog.resize(400, 175)
self.label = QtWidgets.QLabel(aboutDialog)
self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))
self.label.setObjectName('label')
self.label_2 = QtWidgets.QLabel(aboutDialog)
self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))
self.label_2.setObjectName('label_2')
self.label_3 = QtWidgets.QLabel(aboutDialog)
self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))
self.label_3.setOpenExternalLinks(True)
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(aboutDialog)
self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))
self.label_4.setObjectName('label_4')
self.label_5 = QtWidgets.QLabel(aboutDialog)
self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))
self.label_5.setObjectName('label_5')
self.retranslateUi(aboutDialog)
QtCore.QMetaObject.connectSlotsByName(aboutDialog)
def retranslateUi(self, aboutDialog):
_translate = QtCore.QCoreApplication.translate
aboutDialog.setWindowTitle(_translate('aboutDialog', 'About'))
self.label.setText(_translate('aboutDialog', 'About'))
self.label_2.setText(_translate('aboutDialog',
'Author: Andrew Christiansen'))
self.label_3.setText(_translate('aboutDialog',
'Homepage: <a href="https://github.com/drewtchrist/pylabeler">https://github.com/drewtchrist/pylabeler</a>'
))
self.label_4.setText(_translate('aboutDialog', 'Version: 0.1.0'))
self.label_5.setText(_translate('aboutDialog', 'License: MIT'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/about.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_aboutDialog(object):
def setupUi(self, aboutDialog):
aboutDialog.setObjectName("aboutDialog")
aboutDialog.resize(400, 175)
self.label = QtWidgets.QLabel(aboutDialog)
self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(aboutDialog)
self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(aboutDialog)
self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))
self.label_3.setOpenExternalLinks(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(aboutDialog)
self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(aboutDialog)
self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))
self.label_5.setObjectName("label_5")
self.retranslateUi(aboutDialog)
QtCore.QMetaObject.connectSlotsByName(aboutDialog)
def retranslateUi(self, aboutDialog):
_translate = QtCore.QCoreApplication.translate
aboutDialog.setWindowTitle(_translate("aboutDialog", "About"))
self.label.setText(_translate("aboutDialog", "About"))
self.label_2.setText(_translate("aboutDialog", "Author: Andrew Christiansen"))
self.label_3.setText(_translate("aboutDialog", "Homepage: <a href=\"https://github.com/drewtchrist/pylabeler\">https://github.com/drewtchrist/pylabeler</a>"))
self.label_4.setText(_translate("aboutDialog", "Version: 0.1.0"))
self.label_5.setText(_translate("aboutDialog", "License: MIT"))
|
flexible
|
{
"blob_id": "25b3defc8410c72c7c6f25288af91bd0c826f2ed",
"index": 6051,
"step-1": "<mask token>\n\n\nclass Ui_aboutDialog(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_aboutDialog(object):\n\n def setupUi(self, aboutDialog):\n aboutDialog.setObjectName('aboutDialog')\n aboutDialog.resize(400, 175)\n self.label = QtWidgets.QLabel(aboutDialog)\n self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(aboutDialog)\n self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))\n self.label_2.setObjectName('label_2')\n self.label_3 = QtWidgets.QLabel(aboutDialog)\n self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))\n self.label_3.setOpenExternalLinks(True)\n self.label_3.setObjectName('label_3')\n self.label_4 = QtWidgets.QLabel(aboutDialog)\n self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))\n self.label_4.setObjectName('label_4')\n self.label_5 = QtWidgets.QLabel(aboutDialog)\n self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))\n self.label_5.setObjectName('label_5')\n self.retranslateUi(aboutDialog)\n QtCore.QMetaObject.connectSlotsByName(aboutDialog)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_aboutDialog(object):\n\n def setupUi(self, aboutDialog):\n aboutDialog.setObjectName('aboutDialog')\n aboutDialog.resize(400, 175)\n self.label = QtWidgets.QLabel(aboutDialog)\n self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(aboutDialog)\n self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))\n self.label_2.setObjectName('label_2')\n self.label_3 = QtWidgets.QLabel(aboutDialog)\n self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))\n self.label_3.setOpenExternalLinks(True)\n self.label_3.setObjectName('label_3')\n self.label_4 = QtWidgets.QLabel(aboutDialog)\n self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))\n self.label_4.setObjectName('label_4')\n self.label_5 = QtWidgets.QLabel(aboutDialog)\n self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))\n self.label_5.setObjectName('label_5')\n self.retranslateUi(aboutDialog)\n QtCore.QMetaObject.connectSlotsByName(aboutDialog)\n\n def retranslateUi(self, aboutDialog):\n _translate = QtCore.QCoreApplication.translate\n aboutDialog.setWindowTitle(_translate('aboutDialog', 'About'))\n self.label.setText(_translate('aboutDialog', 'About'))\n self.label_2.setText(_translate('aboutDialog',\n 'Author: Andrew Christiansen'))\n self.label_3.setText(_translate('aboutDialog',\n 'Homepage: <a href=\"https://github.com/drewtchrist/pylabeler\">https://github.com/drewtchrist/pylabeler</a>'\n ))\n self.label_4.setText(_translate('aboutDialog', 'Version: 0.1.0'))\n self.label_5.setText(_translate('aboutDialog', 'License: MIT'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_aboutDialog(object):\n\n def setupUi(self, aboutDialog):\n aboutDialog.setObjectName('aboutDialog')\n aboutDialog.resize(400, 175)\n self.label = QtWidgets.QLabel(aboutDialog)\n self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(aboutDialog)\n self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))\n self.label_2.setObjectName('label_2')\n self.label_3 = QtWidgets.QLabel(aboutDialog)\n self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))\n self.label_3.setOpenExternalLinks(True)\n self.label_3.setObjectName('label_3')\n self.label_4 = QtWidgets.QLabel(aboutDialog)\n self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))\n self.label_4.setObjectName('label_4')\n self.label_5 = QtWidgets.QLabel(aboutDialog)\n self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))\n self.label_5.setObjectName('label_5')\n self.retranslateUi(aboutDialog)\n QtCore.QMetaObject.connectSlotsByName(aboutDialog)\n\n def retranslateUi(self, aboutDialog):\n _translate = QtCore.QCoreApplication.translate\n aboutDialog.setWindowTitle(_translate('aboutDialog', 'About'))\n self.label.setText(_translate('aboutDialog', 'About'))\n self.label_2.setText(_translate('aboutDialog',\n 'Author: Andrew Christiansen'))\n self.label_3.setText(_translate('aboutDialog',\n 'Homepage: <a href=\"https://github.com/drewtchrist/pylabeler\">https://github.com/drewtchrist/pylabeler</a>'\n ))\n self.label_4.setText(_translate('aboutDialog', 'Version: 0.1.0'))\n self.label_5.setText(_translate('aboutDialog', 'License: MIT'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'ui/about.ui'\n#\n# Created by: PyQt5 UI code generator 5.15.4\n#\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\n# run again. Do not edit this file unless you know what you are doing.\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_aboutDialog(object):\n def setupUi(self, aboutDialog):\n aboutDialog.setObjectName(\"aboutDialog\")\n aboutDialog.resize(400, 175)\n self.label = QtWidgets.QLabel(aboutDialog)\n self.label.setGeometry(QtCore.QRect(20, 10, 51, 16))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(aboutDialog)\n self.label_2.setGeometry(QtCore.QRect(40, 40, 201, 21))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(aboutDialog)\n self.label_3.setGeometry(QtCore.QRect(40, 70, 261, 21))\n self.label_3.setOpenExternalLinks(True)\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(aboutDialog)\n self.label_4.setGeometry(QtCore.QRect(40, 100, 91, 21))\n self.label_4.setObjectName(\"label_4\")\n self.label_5 = QtWidgets.QLabel(aboutDialog)\n self.label_5.setGeometry(QtCore.QRect(40, 130, 91, 21))\n self.label_5.setObjectName(\"label_5\")\n\n self.retranslateUi(aboutDialog)\n QtCore.QMetaObject.connectSlotsByName(aboutDialog)\n\n def retranslateUi(self, aboutDialog):\n _translate = QtCore.QCoreApplication.translate\n aboutDialog.setWindowTitle(_translate(\"aboutDialog\", \"About\"))\n self.label.setText(_translate(\"aboutDialog\", \"About\"))\n self.label_2.setText(_translate(\"aboutDialog\", \"Author: Andrew Christiansen\"))\n self.label_3.setText(_translate(\"aboutDialog\", \"Homepage: <a href=\\\"https://github.com/drewtchrist/pylabeler\\\">https://github.com/drewtchrist/pylabeler</a>\"))\n self.label_4.setText(_translate(\"aboutDialog\", \"Version: 0.1.0\"))\n self.label_5.setText(_translate(\"aboutDialog\", \"License: MIT\"))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',
ignore_result=True)
def scrape_espn_feed():
"""
Saves latest image from Flickr
"""
thescores = doScoresScrape()
fixScores(thescores, 'MLB')
logger.info('Scores scraped')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = get_task_logger(__name__)
@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',
ignore_result=True)
def scrape_espn_feed():
"""
Saves latest image from Flickr
"""
thescores = doScoresScrape()
fixScores(thescores, 'MLB')
logger.info('Scores scraped')
<|reserved_special_token_1|>
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from bbapp.scripts.getScores import doScoresScrape, fixScores
logger = get_task_logger(__name__)
@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',
ignore_result=True)
def scrape_espn_feed():
"""
Saves latest image from Flickr
"""
thescores = doScoresScrape()
fixScores(thescores, 'MLB')
logger.info('Scores scraped')
<|reserved_special_token_1|>
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from bbapp.scripts.getScores import doScoresScrape, fixScores
logger = get_task_logger(__name__)
@periodic_task(
run_every=(crontab(minute='*/10')),
name="scrape_espn_feed",
ignore_result=True
)
def scrape_espn_feed():
"""
Saves latest image from Flickr
"""
thescores = doScoresScrape()
fixScores(thescores, 'MLB')
logger.info("Scores scraped")
|
flexible
|
{
"blob_id": "a9a067ee3b176d2f2ca558b69ce2bc598bb31d22",
"index": 4501,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',\n ignore_result=True)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info('Scores scraped')\n",
"step-3": "<mask token>\nlogger = get_task_logger(__name__)\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',\n ignore_result=True)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info('Scores scraped')\n",
"step-4": "from celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\nfrom bbapp.scripts.getScores import doScoresScrape, fixScores\nlogger = get_task_logger(__name__)\n\n\n@periodic_task(run_every=crontab(minute='*/10'), name='scrape_espn_feed',\n ignore_result=True)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info('Scores scraped')\n",
"step-5": "from celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\n\n\nfrom bbapp.scripts.getScores import doScoresScrape, fixScores\n\nlogger = get_task_logger(__name__)\n\n\n@periodic_task(\n run_every=(crontab(minute='*/10')),\n name=\"scrape_espn_feed\",\n ignore_result=True\n)\ndef scrape_espn_feed():\n \"\"\"\n Saves latest image from Flickr\n \"\"\"\n thescores = doScoresScrape()\n fixScores(thescores, 'MLB')\n logger.info(\"Scores scraped\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def solution(input):
k = 1
for v in sorted(input):
if v >= k:
k += 1
return k - 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(input):
k = 1
for v in sorted(input):
if v >= k:
k += 1
return k - 1
<|reserved_special_token_0|>
for i in range(int(testcase)):
sys.stdin.readline()
line1 = sys.stdin.readline().rstrip('\n')
line2 = sys.stdin.readline().rstrip('\n')
ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in
line2.split(' ')])
print('Case #{}: {}'.format(i + 1, ans))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(input):
k = 1
for v in sorted(input):
if v >= k:
k += 1
return k - 1
testcase = sys.stdin.readline()
for i in range(int(testcase)):
sys.stdin.readline()
line1 = sys.stdin.readline().rstrip('\n')
line2 = sys.stdin.readline().rstrip('\n')
ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in
line2.split(' ')])
print('Case #{}: {}'.format(i + 1, ans))
<|reserved_special_token_1|>
import sys
def solution(input):
k = 1
for v in sorted(input):
if v >= k:
k += 1
return k - 1
testcase = sys.stdin.readline()
for i in range(int(testcase)):
sys.stdin.readline()
line1 = sys.stdin.readline().rstrip('\n')
line2 = sys.stdin.readline().rstrip('\n')
ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in
line2.split(' ')])
print('Case #{}: {}'.format(i + 1, ans))
<|reserved_special_token_1|>
import sys
def solution(input):
k = 1
for v in sorted(input):
if v >= k:
k += 1
return k - 1
testcase = sys.stdin.readline()
for i in range(int(testcase)):
sys.stdin.readline()
line1 = sys.stdin.readline().rstrip('\n')
line2 = sys.stdin.readline().rstrip('\n')
ans = solution(
[ int(x) for x in line1.split(' ') ],
[ int(x) for x in line2.split(' ') ],
)
print("Case #{}: {}".format(i+1, ans))
|
flexible
|
{
"blob_id": "a89724be31b4ccc1a3d83305509d9624da364a0c",
"index": 6004,
"step-1": "<mask token>\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\n<mask token>\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in\n line2.split(' ')])\n print('Case #{}: {}'.format(i + 1, ans))\n",
"step-3": "<mask token>\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\ntestcase = sys.stdin.readline()\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in\n line2.split(' ')])\n print('Case #{}: {}'.format(i + 1, ans))\n",
"step-4": "import sys\n\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\n\ntestcase = sys.stdin.readline()\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution([int(x) for x in line1.split(' ')], [int(x) for x in\n line2.split(' ')])\n print('Case #{}: {}'.format(i + 1, ans))\n",
"step-5": "import sys\n\ndef solution(input):\n k = 1\n for v in sorted(input):\n if v >= k:\n k += 1\n return k - 1\n\ntestcase = sys.stdin.readline()\nfor i in range(int(testcase)):\n sys.stdin.readline()\n line1 = sys.stdin.readline().rstrip('\\n')\n line2 = sys.stdin.readline().rstrip('\\n')\n ans = solution(\n [ int(x) for x in line1.split(' ') ],\n [ int(x) for x in line2.split(' ') ],\n )\n print(\"Case #{}: {}\".format(i+1, ans))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'
app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'
db.init_app(app)
from .views import views
from .auth import auth
app.register_blueprint(views, urlprefix='/')
app.register_blueprint(auth, urlprefix='/')
return app
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = SQLAlchemy()
DBNAME = 'database.db'
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'
app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'
db.init_app(app)
from .views import views
from .auth import auth
app.register_blueprint(views, urlprefix='/')
app.register_blueprint(auth, urlprefix='/')
return app
<|reserved_special_token_1|>
from flask import Flask, app
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
DBNAME = 'database.db'
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'
app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'
db.init_app(app)
from .views import views
from .auth import auth
app.register_blueprint(views, urlprefix='/')
app.register_blueprint(auth, urlprefix='/')
return app
|
flexible
|
{
"blob_id": "c6fdb9c405427a3583a59065f77c75c4aa781405",
"index": 5417,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'\n app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'\n db.init_app(app)\n from .views import views\n from .auth import auth\n app.register_blueprint(views, urlprefix='/')\n app.register_blueprint(auth, urlprefix='/')\n return app\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\nDBNAME = 'database.db'\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'\n app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'\n db.init_app(app)\n from .views import views\n from .auth import auth\n app.register_blueprint(views, urlprefix='/')\n app.register_blueprint(auth, urlprefix='/')\n return app\n",
"step-4": "from flask import Flask, app\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\nDBNAME = 'database.db'\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'\n app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'\n db.init_app(app)\n from .views import views\n from .auth import auth\n app.register_blueprint(views, urlprefix='/')\n app.register_blueprint(auth, urlprefix='/')\n return app\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import unittest
from music_focus.workflows.weibo_online import WeiboOnline
class Test(unittest.TestCase):
def setUp(self):
pass
def test(self):
workflow_input = {'result_type': 'posts'}
wf = WeiboOnline()
r = wf.run(workflow_input)
print(json.dumps(r, ensure_ascii=False, indent=2))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "7088f7233b67dcb855482a76d304aacc1a26abad",
"index": 3790,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def test(self):\n workflow_input = {'result_type': 'posts'}\n wf = WeiboOnline()\n r = wf.run(workflow_input)\n print(json.dumps(r, ensure_ascii=False, indent=2))\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test(self):\n workflow_input = {'result_type': 'posts'}\n wf = WeiboOnline()\n r = wf.run(workflow_input)\n print(json.dumps(r, ensure_ascii=False, indent=2))\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test(self):\n workflow_input = {'result_type': 'posts'}\n wf = WeiboOnline()\n r = wf.run(workflow_input)\n print(json.dumps(r, ensure_ascii=False, indent=2))\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import json\nimport unittest\nfrom music_focus.workflows.weibo_online import WeiboOnline\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test(self):\n workflow_input = {'result_type': 'posts'}\n wf = WeiboOnline()\n r = wf.run(workflow_input)\n print(json.dumps(r, ensure_ascii=False, indent=2))\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DailyCacheMiddleware(CacheMiddleware):
<|reserved_special_token_0|>
@property
def key_prefix(self):
return date.today().isoformat() + '/' + (self.__key_prefix or '')
@key_prefix.setter
def key_prefix(self, value):
self.__key_prefix = value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DailyCacheMiddleware(CacheMiddleware):
"""Like the cache middleware, but always expires at midnight"""
@property
def key_prefix(self):
return date.today().isoformat() + '/' + (self.__key_prefix or '')
@key_prefix.setter
def key_prefix(self, value):
self.__key_prefix = value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
lt_cache = cache_page(settings.CACHES['eregs_longterm_cache']['TIMEOUT'],
cache='eregs_longterm_cache')
class DailyCacheMiddleware(CacheMiddleware):
"""Like the cache middleware, but always expires at midnight"""
@property
def key_prefix(self):
return date.today().isoformat() + '/' + (self.__key_prefix or '')
@key_prefix.setter
def key_prefix(self, value):
self.__key_prefix = value
daily_cache = decorator_from_middleware_with_args(DailyCacheMiddleware)(
cache_timeout=settings.CACHES['eregs_longterm_cache']['TIMEOUT'],
cache_alias='eregs_longterm_cache')
<|reserved_special_token_1|>
from datetime import date
from django.conf import settings
from django.utils.decorators import decorator_from_middleware_with_args
from django.views.decorators.cache import cache_page
from django.middleware.cache import CacheMiddleware
lt_cache = cache_page(settings.CACHES['eregs_longterm_cache']['TIMEOUT'],
cache='eregs_longterm_cache')
class DailyCacheMiddleware(CacheMiddleware):
"""Like the cache middleware, but always expires at midnight"""
@property
def key_prefix(self):
return date.today().isoformat() + '/' + (self.__key_prefix or '')
@key_prefix.setter
def key_prefix(self, value):
self.__key_prefix = value
daily_cache = decorator_from_middleware_with_args(DailyCacheMiddleware)(
cache_timeout=settings.CACHES['eregs_longterm_cache']['TIMEOUT'],
cache_alias='eregs_longterm_cache')
|
flexible
|
{
"blob_id": "5b440484c5d7f066c54837c2812967a0ff360399",
"index": 9905,
"step-1": "<mask token>\n\n\nclass DailyCacheMiddleware(CacheMiddleware):\n <mask token>\n\n @property\n def key_prefix(self):\n return date.today().isoformat() + '/' + (self.__key_prefix or '')\n\n @key_prefix.setter\n def key_prefix(self, value):\n self.__key_prefix = value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DailyCacheMiddleware(CacheMiddleware):\n \"\"\"Like the cache middleware, but always expires at midnight\"\"\"\n\n @property\n def key_prefix(self):\n return date.today().isoformat() + '/' + (self.__key_prefix or '')\n\n @key_prefix.setter\n def key_prefix(self, value):\n self.__key_prefix = value\n\n\n<mask token>\n",
"step-3": "<mask token>\nlt_cache = cache_page(settings.CACHES['eregs_longterm_cache']['TIMEOUT'],\n cache='eregs_longterm_cache')\n\n\nclass DailyCacheMiddleware(CacheMiddleware):\n \"\"\"Like the cache middleware, but always expires at midnight\"\"\"\n\n @property\n def key_prefix(self):\n return date.today().isoformat() + '/' + (self.__key_prefix or '')\n\n @key_prefix.setter\n def key_prefix(self, value):\n self.__key_prefix = value\n\n\ndaily_cache = decorator_from_middleware_with_args(DailyCacheMiddleware)(\n cache_timeout=settings.CACHES['eregs_longterm_cache']['TIMEOUT'],\n cache_alias='eregs_longterm_cache')\n",
"step-4": "from datetime import date\nfrom django.conf import settings\nfrom django.utils.decorators import decorator_from_middleware_with_args\nfrom django.views.decorators.cache import cache_page\nfrom django.middleware.cache import CacheMiddleware\nlt_cache = cache_page(settings.CACHES['eregs_longterm_cache']['TIMEOUT'],\n cache='eregs_longterm_cache')\n\n\nclass DailyCacheMiddleware(CacheMiddleware):\n \"\"\"Like the cache middleware, but always expires at midnight\"\"\"\n\n @property\n def key_prefix(self):\n return date.today().isoformat() + '/' + (self.__key_prefix or '')\n\n @key_prefix.setter\n def key_prefix(self, value):\n self.__key_prefix = value\n\n\ndaily_cache = decorator_from_middleware_with_args(DailyCacheMiddleware)(\n cache_timeout=settings.CACHES['eregs_longterm_cache']['TIMEOUT'],\n cache_alias='eregs_longterm_cache')\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
from django.db import models
from datetime import datetime
# Message model for testing purposes
class Message(models.Model):
type = models.CharField(max_length=10)
body = models.CharField(max_length=50)
def __str__(self):
return self.type + ":" + self.body
# Company model
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
# model for storing message and its prediction
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime("%m/%d/%Y, %H:%M:%S") + " " + self.prediction + ":" + self.message
|
normal
|
{
"blob_id": "47f6c4b3c279a065b8f21dab2faa71271db8d6ab",
"index": 6680,
"step-1": "<mask token>\n\n\nclass Company(models.Model):\n <mask token>\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-2": "<mask token>\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-3": "<mask token>\n\n\nclass Message(models.Model):\n type = models.CharField(max_length=10)\n body = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type + ':' + self.body\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Message(models.Model):\n type = models.CharField(max_length=10)\n body = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type + ':' + self.body\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-5": "from django.db import models\r\nfrom datetime import datetime\r\n\r\n\r\n# Message model for testing purposes\r\nclass Message(models.Model):\r\n type = models.CharField(max_length=10)\r\n body = models.CharField(max_length=50)\r\n\r\n def __str__(self):\r\n return self.type + \":\" + self.body\r\n\r\n\r\n# Company model\r\nclass Company(models.Model):\r\n name = models.CharField(max_length=10)\r\n\r\n @classmethod\r\n def create(cls, name):\r\n company = cls(name=name)\r\n return company\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n# model for storing message and its prediction\r\nclass Entry(models.Model):\r\n fetched_date = models.DateTimeField()\r\n message = models.CharField(max_length=200)\r\n prediction = models.CharField(max_length=10)\r\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\r\n\r\n @classmethod\r\n def create(cls, message, prediction, company):\r\n entry = cls(message=message, prediction=prediction, parent_company=company)\r\n entry.fetched_date = datetime.now()\r\n return entry\r\n\r\n def __str__(self):\r\n return self.fetched_date.strftime(\"%m/%d/%Y, %H:%M:%S\") + \" \" + self.prediction + \":\" + self.message\r\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration for REANA-Workflow-Controller."""
from __future__ import absolute_import, print_function
import os
import shutil
import pytest
from reana_db.models import Base, User
from sqlalchemy_utils import create_database, database_exists, drop_database
from reana_workflow_controller.factory import create_app
@pytest.fixture(scope="module")
def base_app(tmp_shared_volume_path):
"""Flask application fixture."""
config_mapping = {
"SERVER_NAME": "localhost:5000",
"SECRET_KEY": "SECRET_KEY",
"TESTING": True,
"SHARED_VOLUME_PATH": tmp_shared_volume_path,
"SQLALCHEMY_DATABASE_URI": "sqlite:///testdb.db",
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
"ORGANIZATIONS": ["default"],
}
app_ = create_app(config_mapping)
return app_
|
normal
|
{
"blob_id": "502e92d3e5d059d73016702ce0b2591a123810d3",
"index": 6892,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture(scope='module')\ndef base_app(tmp_shared_volume_path):\n \"\"\"Flask application fixture.\"\"\"\n config_mapping = {'SERVER_NAME': 'localhost:5000', 'SECRET_KEY':\n 'SECRET_KEY', 'TESTING': True, 'SHARED_VOLUME_PATH':\n tmp_shared_volume_path, 'SQLALCHEMY_DATABASE_URI':\n 'sqlite:///testdb.db', 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'ORGANIZATIONS': ['default']}\n app_ = create_app(config_mapping)\n return app_\n",
"step-3": "<mask token>\nfrom __future__ import absolute_import, print_function\nimport os\nimport shutil\nimport pytest\nfrom reana_db.models import Base, User\nfrom sqlalchemy_utils import create_database, database_exists, drop_database\nfrom reana_workflow_controller.factory import create_app\n\n\n@pytest.fixture(scope='module')\ndef base_app(tmp_shared_volume_path):\n \"\"\"Flask application fixture.\"\"\"\n config_mapping = {'SERVER_NAME': 'localhost:5000', 'SECRET_KEY':\n 'SECRET_KEY', 'TESTING': True, 'SHARED_VOLUME_PATH':\n tmp_shared_volume_path, 'SQLALCHEMY_DATABASE_URI':\n 'sqlite:///testdb.db', 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'ORGANIZATIONS': ['default']}\n app_ = create_app(config_mapping)\n return app_\n",
"step-4": "# -*- coding: utf-8 -*-\n#\n# This file is part of REANA.\n# Copyright (C) 2017, 2018 CERN.\n#\n# REANA is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Pytest configuration for REANA-Workflow-Controller.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport shutil\n\nimport pytest\nfrom reana_db.models import Base, User\nfrom sqlalchemy_utils import create_database, database_exists, drop_database\n\nfrom reana_workflow_controller.factory import create_app\n\n\n@pytest.fixture(scope=\"module\")\ndef base_app(tmp_shared_volume_path):\n \"\"\"Flask application fixture.\"\"\"\n config_mapping = {\n \"SERVER_NAME\": \"localhost:5000\",\n \"SECRET_KEY\": \"SECRET_KEY\",\n \"TESTING\": True,\n \"SHARED_VOLUME_PATH\": tmp_shared_volume_path,\n \"SQLALCHEMY_DATABASE_URI\": \"sqlite:///testdb.db\",\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False,\n \"ORGANIZATIONS\": [\"default\"],\n }\n app_ = create_app(config_mapping)\n return app_\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib as mpl
from matplotlib import pyplot as plt
plt.style.use('seaborn-muted')
#from IPython import get_ipython
from IPython.display import HTML, Markdown
import air_cargo_problems as acp
problems = ['Air Cargo Problem 1',
'Air Cargo Problem 2',
'Air Cargo Problem 3',
'Air Cargo Problem 4']
SEARCHES = ['breadth_first_search',
'depth_first_graph_search',
'uniform_cost_search',
'greedy_best_first_graph_search h_unmet_goals',
'greedy_best_first_graph_search h_pg_levelsum',
'greedy_best_first_graph_search h_pg_maxlevel',
'greedy_best_first_graph_search h_pg_setlevel',
'astar_search h_unmet_goals',
'astar_search h_pg_levelsum',
'astar_search h_pg_maxlevel',
'astar_search h_pg_setlevel']
def get_prob_specs():
Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(),
acp.air_cargo_p3(), acp.air_cargo_p4()]
problems_specs = {'Problem': [name for name in problems],
'Air cargo problem': [i+1 for i in range(len(problems))],
'Cargos': [len(p.cargos) for p in Probs],
'Planes': [len(p.planes) for p in Probs],
'Airports': [len(p.airports) for p in Probs],
'Goal': [len(p.goal) for p in Probs]}
return pd.DataFrame(problems_specs)
specs = get_prob_specs()
def df2tsv(df, fname, replace=False):
if Path(fname).exists():
if replace:
df.to_csv(fname, sep='\t')
#else:
# print(f'File {fname} not replaced.')
return
df.to_csv(fname, sep='\t')
return
def get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=False, replace=False):
"""
Combine all processed files of a problem found in Path(data_dir) with given stem.
The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.
Input example:
file_stem = 'prob_2'
problem = 'Air Cargo Problem 2'
Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
# input/output file suffixes:
sfx = ['.csv', '_df.csv']
# Try retrieving it from out_dir if not replacing it:
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
# else: (re)process
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
# Cols to add:
val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
# Rename cols: c (temp) -> Searcher
df.columns = ['c', 'Searcher']
# Add new cols & reindex
df = df.reindex(columns = df.columns.tolist() + val_cols)
# Populate new cols according to row with search name:
sr = df.loc[df.c == 'Searcher', 'Searcher']
for (idx, sr_row) in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
# Add a minute column:
df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)
# Replace values of 1st col with problem name & update col name:
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
# reduced
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
# add col for function name
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
# reorder cols
dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',
'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]
# complete runs only:
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
# get the html string:
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans/dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
# Modify the legend to indicate excluded searches
# (bc colormap is identical to fig1/2, but some runs have no data).
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [excluded[i][0]-1 for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches,
title=legt,
title_fontsize='14',
fontsize='medium',
bbox_to_anchor=legend_bbox,
loc='upper left',
labelspacing=0.6,
fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
def format_multiples(multi):
s = ''
for i in range(len(multi)):
s += '{'+ str(i) +':s}, '
s = s[:-2]
return '[' + s.format(*multi.values) + ']'
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
# orders of magnitude difference btw dfB and dfA (min, max):
mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
# get the indices of values above average:
ma = mag[mag > mag_aver].index.tolist()
# get the names of all searchers corresponding to the ma:
above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):
p1 = df1.loc[0,'Problem'][-1]
p2 = df2.loc[0,'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = f"""<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, """
text += f"the <i>average</i> order of magnitude difference in {headinglc} is "
text += f"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>"
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1,'Problem']!= problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))
# return their 1-base index also:
out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ""
for i, c in candidates_tup:
elim_list += f"<dt><b>{i:>2}: {c}</b></dt>"
text = """<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">"""
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f"<dl>{elim_list}</dl></p></pre>"
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style="{width: 80%}"):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f"""<div style="{div_style}">{div_html_text}</div>"""
if output_string:
return div
#get_ipython().set_next_input(div, 'markdown')
else:
return Markdown(div)
|
normal
|
{
"blob_id": "cd49230be3c418853aa2986ed727204e51a6b6ae",
"index": 3794,
"step-1": "<mask token>\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\n<mask token>\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\n<mask token>\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\n<mask token>\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-2": "<mask token>\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\n<mask token>\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\n<mask token>\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-3": "<mask token>\nplt.style.use('seaborn-muted')\n<mask token>\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\n<mask token>\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{' + str(i) + ':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-muted')\nfrom IPython.display import HTML, Markdown\nimport air_cargo_problems as acp\nproblems = ['Air Cargo Problem 1', 'Air Cargo Problem 2',\n 'Air Cargo Problem 3', 'Air Cargo Problem 4']\nSEARCHES = ['breadth_first_search', 'depth_first_graph_search',\n 'uniform_cost_search', 'greedy_best_first_graph_search h_unmet_goals',\n 'greedy_best_first_graph_search h_pg_levelsum',\n 'greedy_best_first_graph_search h_pg_maxlevel',\n 'greedy_best_first_graph_search h_pg_setlevel',\n 'astar_search h_unmet_goals', 'astar_search h_pg_levelsum',\n 'astar_search h_pg_maxlevel', 'astar_search h_pg_setlevel']\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\nspecs = get_prob_specs()\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{' + str(i) + ':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-muted')\n\n#from IPython import get_ipython\nfrom IPython.display import HTML, Markdown\n\nimport air_cargo_problems as acp\n\n\nproblems = ['Air Cargo Problem 1', \n 'Air Cargo Problem 2',\n 'Air Cargo Problem 3',\n 'Air Cargo Problem 4']\n\nSEARCHES = ['breadth_first_search',\n 'depth_first_graph_search',\n 'uniform_cost_search',\n 'greedy_best_first_graph_search h_unmet_goals',\n 'greedy_best_first_graph_search h_pg_levelsum',\n 'greedy_best_first_graph_search h_pg_maxlevel',\n 'greedy_best_first_graph_search h_pg_setlevel',\n 'astar_search h_unmet_goals',\n 'astar_search h_pg_levelsum',\n 'astar_search h_pg_maxlevel',\n 'astar_search h_pg_setlevel']\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(),\n acp.air_cargo_p3(), acp.air_cargo_p4()]\n\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [i+1 for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs],\n 'Planes': [len(p.planes) for p in Probs],\n 'Airports': [len(p.airports) for p in Probs],\n 'Goal': [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\nspecs = get_prob_specs()\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n #else:\n # print(f'File {fname} not replaced.')\n return\n \n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n \n t = '\\t'\n \n # input/output file suffixes:\n sfx = ['.csv', '_df.csv']\n \n # Try retrieving it from out_dir if not replacing it:\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n # else: (re)process\n \n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n \n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n \n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n \n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n \n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n \n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n \n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n \n # Cols to add:\n val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n \n # Rename cols: c (temp) -> Searcher\n df.columns = ['c', 'Searcher']\n # Add new cols & reindex\n df = df.reindex(columns = df.columns.tolist() + val_cols)\n \n # Populate new cols according to row with search name:\n sr = df.loc[df.c == 'Searcher', 'Searcher'] \n for (idx, sr_row) in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n\n df.dropna(inplace=True)\n # Add a minute column:\n df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)\n \n # Replace values of 1st col with problem name & update col name:\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n # reduced\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n # add col for function name\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n # reorder cols\n dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',\n 'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]\n\n # complete runs only:\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n \n dfa_rows = dfa.shape[0]\n \n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n\n # get the html string:\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n \n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n\n pct_plans = n_plans/dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn/n_plans\n\n text = f\"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>\"\n text += f\"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.\"\n if len(uniq_probs) < 4:\n text += \" And this occurs only for Problems: \"\n pro = \",\".join('{}' for p in uniq_probs) +'.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += \" And this occurs for all Problems.\"\n text += \"<br>\"\n \n return df_fn_html, text, dfout\n\ndef make_bar_plots(df_list,\n x_col, y_col,\n problems,\n legend_bbox=(.05, .95),\n to_file='',\n show=False,\n excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\" \n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1>0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2>0]\n assert len(a1) == len(a2) == 1\n \n action_nums = [a1[0], a2[0]]\n \n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n \n # Seach functions names should be common to all dfs:\n search = df_list[0].Searcher.tolist()\n \n # Sample cmap according to categories:\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i*m] for i in range(s_len)]\n \n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))\n \n # Use the minutes columns for the more complex problems:\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4: # applies to problems 3/4\n y_col = 'Minutes'\n else:\n ty_col = y_col\n \n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',\n y = 1.05, fontsize=14)\n\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n # log scale on NewNodes for df2, df3, df4:\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n \n axs[i].set_ylabel(ylab, fontsize=12)\n\n df[y_col].plot.bar(ax=axs[i], logy=ylog,\n color=colors,\n legend=False)\n \n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n # Modify the legend to indicate excluded searches\n # (bc colormap is identical to fig1/2, but some runs have no data).\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [excluded[i][0]-1 for i in range(excluded_len)]\n \n legend_patches = [] \n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n \n axs[1].legend(handles=legend_patches,\n title=legt,\n title_fontsize='14',\n fontsize='medium', \n bbox_to_anchor=legend_bbox, \n loc='upper left',\n labelspacing=0.6,\n fancybox=True)\n\n plt.tight_layout()\n \n if to_file:\n plt.savefig(to_file)\n \n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{'+ str(i) +':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n\n # orders of magnitude difference btw dfB and dfA (min, max):\n mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n\n # get the indices of values above average:\n ma = mag[mag > mag_aver].index.tolist()\n \n # get the names of all searchers corresponding to the ma:\n above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):\n\n p1 = df1.loc[0,'Problem'][-1]\n p2 = df2.loc[0,'Problem'][-1]\n \n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n \n headinglc = heading.lower()\n text = f\"\"\"<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, \"\"\"\n text += f\"the <i>average</i> order of magnitude difference in {headinglc} is \"\n text += f\"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>\"\n\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1,'Problem']!= problems[0]:\n return\n \n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))\n # return their 1-base index also:\n out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]\n return out\n\n \ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n\n elim_list = \"\"\n for i, c in candidates_tup:\n elim_list += f\"<dt><b>{i:>2}: {c}</b></dt>\"\n \n text = \"\"\"<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">\"\"\"\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f\"<dl>{elim_list}</dl></p></pre>\"\n \n if return_html:\n return text\n else:\n return Markdown(text) \n\n \ndef add_div_around_html(div_html_text, output_string=False, div_style=\"{width: 80%}\"):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f\"\"\"<div style=\"{div_style}\">{div_html_text}</div>\"\"\"\n if output_string:\n return div\n #get_ipython().set_next_input(div, 'markdown')\n else:\n return Markdown(div)",
"step-ids": [
6,
12,
14,
16,
17
]
}
|
[
6,
12,
14,
16,
17
] |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-Today Serpent Consulting Services Pvt.Ltd. (<http://www.serpentcs.com>).
# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from odoo import api, models
import time
class location_accommodation(models.AbstractModel):
_name = 'report.sg_accommodation.view_location_report'
@api.model
def get_companies(self):
company_list=[]
self.td_list = []
comp_ids=self.env['res.company'].search([('tenant', '=', True)])
for comp in comp_ids:
company_list.append(comp.company_code)
if company_list:
company_list.sort()
no_of_td=company_list
for td in range(0,len(no_of_td)):
self.td_list.append(td)
return company_list
@api.multi
def render_html(self, docids, data=None):
report = self.env['report']._get_report_from_name('sg_accommodation.view_location_report')
records = self.env['accommodation.accommodation'].browse(self.ids)
docargs = {'doc_ids' : self.ids,
'doc_model' : report.model,
'data' : data,
'docs' : records,
'time' : time,
'get_companies' : self.get_companies}
return self.env['report'].render('sg_accommodation.view_location_report', docargs)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
normal
|
{
"blob_id": "ac99c19294661657d383b036c9ab83e7b610cb7d",
"index": 6896,
"step-1": "<mask token>\n\n\nclass location_accommodation(models.AbstractModel):\n <mask token>\n <mask token>\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-2": "<mask token>\n\n\nclass location_accommodation(models.AbstractModel):\n <mask token>\n\n @api.model\n def get_companies(self):\n company_list = []\n self.td_list = []\n comp_ids = self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td = company_list\n for td in range(0, len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-3": "<mask token>\n\n\nclass location_accommodation(models.AbstractModel):\n _name = 'report.sg_accommodation.view_location_report'\n\n @api.model\n def get_companies(self):\n company_list = []\n self.td_list = []\n comp_ids = self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td = company_list\n for td in range(0, len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-4": "from odoo import api, models\nimport time\n\n\nclass location_accommodation(models.AbstractModel):\n _name = 'report.sg_accommodation.view_location_report'\n\n @api.model\n def get_companies(self):\n company_list = []\n self.td_list = []\n comp_ids = self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td = company_list\n for td in range(0, len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2011-Today Serpent Consulting Services Pvt.Ltd. (<http://www.serpentcs.com>).\n# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n#\n##############################################################################\nfrom odoo import api, models\nimport time\n\n\nclass location_accommodation(models.AbstractModel):\n _name = 'report.sg_accommodation.view_location_report'\n\n @api.model\n def get_companies(self):\n company_list=[]\n self.td_list = []\n comp_ids=self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td=company_list\n for td in range(0,len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name('sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids' : self.ids,\n 'doc_model' : report.model,\n 'data' : data,\n 'docs' : records,\n 'time' : time,\n 'get_companies' : self.get_companies}\n return self.env['report'].render('sg_accommodation.view_location_report', docargs)\n \n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
<|reserved_special_token_0|>
print('Checksum: ' + str(checkSum))
print('%s seconds' % (time.time() - startTime))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
inputStr = """crruafyzloguvxwctqmphenbkd
srcjafyzlcguvrwctqmphenbkd
srijafyzlogbpxwctgmphenbkd
zrijafyzloguvxrctqmphendkd
srijabyzloguvowcqqmphenbkd
srijafyzsoguvxwctbmpienbkd
srirtfyzlognvxwctqmphenbkd
srijafyzloguvxwctgmphenbmq
senjafyzloguvxectqmphenbkd
srijafyeloguvxwwtqmphembkd
srijafyzlogurxtctqmpkenbkd
srijafyzlkguvxictqhphenbkd
srijafgzlogunxwctqophenbkd
shijabyzloguvxwctqmqhenbkd
srjoafyzloguvxwctqmphenbwd
srijafyhloguvxwmtqmphenkkd
srijadyzlogwvxwctqmphenbed
brijafyzloguvmwctqmphenhkd
smijafyzlhguvxwctqmphjnbkd
sriqafvzloguvxwctqmpheebkd
srijafyzloguvxwisqmpuenbkd
mrijakyuloguvxwctqmphenbkd
srnfafyzloguvxwctqmphgnbkd
srijadyzloguvxwhfqmphenbkd
srijafhzloguvxwctdmlhenbkd
srijafyzloguvxwcsqmphykbkd
srijafyzlogwvxwatqmphhnbkd
srijafyzlozqvxwctqmphenbku
srijafyzloguvxwcbamphenbgd
srijafyzlfguvxwctqmphzybkd
srijafyzloguqxwetqmphenkkd
srijafyylogubxwttqmphenbkd
srijafyzloguvxzctadphenbkd
srijafyzloguoxwhtqmchenbkd
srijafyzloguvxwcvqmzhenbko
srijnfyzloguvxwctqmchenjkd
srijaryzloggvxwctqzphenbkd
srijafhzleguvxwcxqmphenbkd
ssijafyzllguvxfctqmphenbkd
srijafyzloguvxdctqmfhenbcd
srijafyzloguvxfctqmplynbkd
srijaftzlogavxwcrqmphenbkd
sriwaoyzloguvxwctqmphenbtd
srijahyzlogunxwctqmphenbvd
srjjafyzloguzxwctumphenbkd
nrijafyzlxguvxwctqmphanbkd
srijafezlqguyxwctqmphenbkd
srijafygloguvxwjtqcphenbkd
erijafyzloguvxoctqmnhenbkd
ssijafyzllguvxwbtqmphenbkd
sriaafyzloguvxwctqqphenbkv
frijafyzloguvswctwmphenbkd
srijafyzyogkvxwctqmprenbkd
syijafyzuoguvxwctqmkhenbkd
srijafyzloganxwctqmphenbkf
srijafyzloguvxwftqmxhenbkq
srijafyflogxvxwctqmghenbkd
srijafyzsoguvxwctqmpjenwkd
srujafylloguvxwctqmphenckd
srijafyzlpzuvxwctqmphenbud
srijafyzlogfvxwctqmhhenbwd
srijafjzlogusxwctqmphepbkd
srijlfyzloguvxwctqfphenzkd
srijafyzlogwvxwctqyphenbqd
srijafyzloluvxwctqtphenukd
srizafyzlowuvxwctqmphqnbkd
sritafkzlkguvxwctqmphenbkd
sbijafdzloguvxgctqmphenbkd
crijafyeloguvxwctqmpsenbkd
srijafyvlogulxwctqmphenbkk
srijafyologuvxwctqmehegbkd
siijafyzloguvxwctjmphenbmd
srijafyzlupuvxwctqmpheabkd
srijafyzlogumxwctqqphanbkd
srijxfyzlogujxwcqqmphenbkd
irijafizeoguvxwctqmphenbkd
sgijafyzloguvtwctqmpfenbkd
srijzfyzloguvmwctnmphenbkd
srijafyzwohuvxwctqmthenbkd
srijafyzlhguvxoctqwphenbkd
srgjafyplogxvxwctqmphenbkd
srijafyqlogovxwctqzphenbkd
srijafjzloguvlnvtqmphenbkd
srijafyzooguvxwctqmphenvud
srijafyzgoguvxwctumphgnbkd
srijaffzloguvxwdqqmphenbkd
srijafyzlogugxwctqxphenbkr
srijafyzlogutxwctqmmcenbkd
srifafyzlhguwxwctqmphenbkd
mrimajyzloguvxwctqmphenbkd
sriyafyzloguvxwcthmphejbkd
srieakyzlokuvxwctqmphenbkd
srisafyzloguhxwctqmphecbkd
srijanyzloguvxcctqmxhenbkd
srijafyzypguvxwctqmqhenbkd
sryjtfyzlvguvxwctqmphenbkd
srijafyzlsguvxwctqmqfenbkd
srijafyzlogudxwbtqwphenbkd
srijysyzloguvxwctqmpvenbkd
srijafyzloggvxwjtqmphegbkd
srijgfyzloguvxwctqmbhdnbkd
ssijufyzloguvawctqmphenbkd
skojafyzloguvxwctqmphenbnd
srijafylloguvxwcqqmpienbkd
trioafyzloguvqwctqmphenbkd
srijafydloguvxwctqmpzjnbkd
saijafvzloguvxwcqqmphenbkd
srhjapyzloguvxwctqmbhenbkd
srijafyzlfguvxwcsqmpwenbkd
shijafyzboguvxwctqmphenbmd
srizafysloguvxwrtqmphenbkd
srijafyzloguvxwciqmwhenbkj
qrijafyzloduvxwctqmphenbko
srijefyuloguvxwctqmphenbed
srijafyzlobuvxwctqmphenhbd
srijafyzloxuvxwctqmpheabkq
srijafyzloguvrwctqmghenkkd
sfisafywloguvxwctqmphenbkd
srgjafyzlogurxwctqmphenbkp
srijafhzloguvxwcjqmphenhkd
srijafyylogufxwrtqmphenbkd
srijafyzvoguvxwzkqmphenbkd
sqijafyzloguvxwctqmpheqbxd
srijafyvloguvxwctqzpherbkd
srijufyzloguvxlcsqmphenbkd
srijafykloguvxlccqmphenbkd
srijafyzloguexwcrqmphenzkd
sridifyzloguyxwctqmphenbkd
srijafyzlogfvxwctqlphenbkl
srijafyzlodqdxwctqmphenbkd
srijafyzloruvxactqmphenekd
grijafyzloguvxpctmmphenbkd
srsjakyzloguvxwctqmphvnbkd
srikafyvloguvxwrtqmphenbkd
srijafyzloguvxwctqjpserbkd
jrijafyzloguvxwctqmpgesbkd
swijafyzluguvxwctqmfhenbkd
srijanynlogovxwctqmphenbkd
jrijafyzloguvxwctymphrnbkd
srinafyzloguvewctqmphenbzd
srijakyzloguvxwctqmphcnbka
srijafyhlobuvxwctqmphenbka
srijafyzcogusxwctqmphwnbkd
srijavyzlosuvxwctqmphjnbkd
orijafyzxoguvxwcnqmphenbkd
srijafyzlogcvxwvtqmthenbkd
srijapyzloauvxwctqmphenvkd
srijaflzloguhxwctqmphenbwd
smijafyzlonuvxwctqmphenbkw
jrijafyzloguvxwclqmnhenbkd
srijaqyzloguvqwctqmphenskd
srijasyzloguvxwctqmvhenbku
crijtfyzloguvxwctqmthenbkd
srrkafyzvoguvxwctqmphenbkd
srijatyzloguvewctqmphenbld
srfjafyyloguvnwctqmphenbkd
srijafyzloguvxwctqjpbenbkt
hrijafyzooguvxwctqmphenbld
srijafbzlogscxwctqmphenbkd
srinafyzlogxvxwctqqphenbkd
slijafyzloglvxwctqmphenbdd
srijafyzlogjvxwcsqmphenbld
sryjcfyzloguvewctqmphenbkd
srijafyzloguexwctqmohknbkd
jaijafyzlogevxwctqmphenbkd
srijafbzlogavxwctqmphenbki
srijafozlogpvxwctqmphgnbkd
srijdfyzloguvxwczqmphenbkm
srijafyzlobuvxwctqmphxndkd
mrijifyzlhguvxwctqmphenbkd
srijafyzloguvxbctumphjnbkd
srijafyzloyuvxwptqmphlnbkd
arijafyzloguvxwcsqmohenbkd
srijaftzioguvxwttqmphenbkd
srijafyzlqsuvxwctqmphxnbkd
srijafyzioguvxwctqnphetbkd
prijafbzloguvxdctqmphenbkd
srijaeyzlnguvxwmtqmphenbkd
srijofyzloguvqwctqmphonbkd
srixaryzpoguvxwctqmphenbkd
srijafyzlowuvxwcwhmphenbkd
srijafydloguvxwctqmptenikd
srijqfyzlogtvfwctqmphenbkd
srijafyzloguvxlctqmpvenbgd
srijafyzlbguvxwjtqgphenbkd
srijafyzlohuqxwctqmphenbka
srijafyzroguvxictqmphynbkd
srijafyzloguvxdctjmphenjkd
srijaoczloguvxwctqmphenbjd
srajafhzloguvxwctqmphenbke
srijofyzloduvxwctqmphanbkd
srijafytloguvxwmtnmphenbkd
srijafyzuoguvxwceqmpgenbkd
rrijafyzloyuvxwctqmphlnbkd
srljafyzloguvxictqmohenbkd
srijafyzlogulxwcrqrphenbkd
srajafyzloguvxwctqmphanbke
srijafyzlhguvxwxtqmpheabkd
sxijafyzloggwxwctqmphenbkd
srijafyultguvxwctqmphinbkd
srijafyzloguvtwctqmfhvnbkd
srijafwzloruvxwctquphenbkd
srbjafyzxoguuxwctqmphenbkd
erijafyzlxguvxbctqmphenbkd
srijagyzlojubxwctqmphenbkd
srijafyzloguvxwdtqmchenakd
srijafkzlogukxwctqiphenbkd
mridafyzloguvxwctqmphenrkd
szqjafyzloguvxwctqmpheibkd
srijahyzloguvxwctcmphenekd
srijafyzloguvxwczpuphenbkd
srijafyzcoguvfwctqmphenbkq
qriiafyzloguvxwctqmpheebkd
srijpfyzloguvxlctqmphenokd
srijzfyzlotuvxwcjqmphenbkd
srinafyqloguvxwctfmphenbkd
srijafyzlogjvxpltqmphenbkd
srijafyzlotuvxwutqmphenbtd
sridafyzloguvxwctqmpyenokd
srxjafyzqogyvxwctqmphenbkd
ssijafyzzoguvxwctqmphenbad
srijafrzloguvxwctqmphekpkd
srijafyzlfgrvxactqmphenbkd
srijafyzroguvxwttqmphekbkd
srijefyzloguvxwctqmpqenbrd
srijefycloguvxwctqmchenbkd
srzjafyzloguvxwcqqmphanbkd
srijauyzlhguvxwctqmphenbgd
srijafyzloguvmwvnqmphenbkd
srihafyzloguvlwotqmphenbkd
srigafyzloguvxwctqmphennsd
sriuafzzloguvxwcuqmphenbkd
srijavuzllguvxwctqmphenbkd
srijafjzloguvlnctqmphenbkd
lrirafyzloguvxwctqmphenbld
soijarxzloguvxwctqmphenbkd
srijapyzlnguvxwctqmdhenbkd
srijafyzkogujxmctqmphenbkd
srijafuzloguvxwcsqvphenbkd
srijagyzzoguvxwctqmpvenbkd
srijafyzlovuvxwctqmrhenbxd
srijafyzqoguvxwctwmpienbkd
sxijafyzloguvxwutqmphenlkd
srijafyzlhgzvxwctqmphqnbkd
srijajyzloguvxwcbwmphenbkd
srijazyzloguvxwhtqmphenbkx
srgjafyzloguvvwctqmphdnbkd
rrivafyzloguvxjctqmphenbkd
srijifyzdoguvxwctqmphenbka
hrijafyzloguvxectqmpheybkd"""
startTime = time.time()
inputList = list(map(str, inputStr.splitlines()))
numRepeatsChar = 0
doubleDupes = 0
tripleDupes = 0
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
checkSum = doubleDupes * tripleDupes
print('Checksum: ' + str(checkSum))
print('%s seconds' % (time.time() - startTime))
<|reserved_special_token_1|>
import time
inputStr = """crruafyzloguvxwctqmphenbkd
srcjafyzlcguvrwctqmphenbkd
srijafyzlogbpxwctgmphenbkd
zrijafyzloguvxrctqmphendkd
srijabyzloguvowcqqmphenbkd
srijafyzsoguvxwctbmpienbkd
srirtfyzlognvxwctqmphenbkd
srijafyzloguvxwctgmphenbmq
senjafyzloguvxectqmphenbkd
srijafyeloguvxwwtqmphembkd
srijafyzlogurxtctqmpkenbkd
srijafyzlkguvxictqhphenbkd
srijafgzlogunxwctqophenbkd
shijabyzloguvxwctqmqhenbkd
srjoafyzloguvxwctqmphenbwd
srijafyhloguvxwmtqmphenkkd
srijadyzlogwvxwctqmphenbed
brijafyzloguvmwctqmphenhkd
smijafyzlhguvxwctqmphjnbkd
sriqafvzloguvxwctqmpheebkd
srijafyzloguvxwisqmpuenbkd
mrijakyuloguvxwctqmphenbkd
srnfafyzloguvxwctqmphgnbkd
srijadyzloguvxwhfqmphenbkd
srijafhzloguvxwctdmlhenbkd
srijafyzloguvxwcsqmphykbkd
srijafyzlogwvxwatqmphhnbkd
srijafyzlozqvxwctqmphenbku
srijafyzloguvxwcbamphenbgd
srijafyzlfguvxwctqmphzybkd
srijafyzloguqxwetqmphenkkd
srijafyylogubxwttqmphenbkd
srijafyzloguvxzctadphenbkd
srijafyzloguoxwhtqmchenbkd
srijafyzloguvxwcvqmzhenbko
srijnfyzloguvxwctqmchenjkd
srijaryzloggvxwctqzphenbkd
srijafhzleguvxwcxqmphenbkd
ssijafyzllguvxfctqmphenbkd
srijafyzloguvxdctqmfhenbcd
srijafyzloguvxfctqmplynbkd
srijaftzlogavxwcrqmphenbkd
sriwaoyzloguvxwctqmphenbtd
srijahyzlogunxwctqmphenbvd
srjjafyzloguzxwctumphenbkd
nrijafyzlxguvxwctqmphanbkd
srijafezlqguyxwctqmphenbkd
srijafygloguvxwjtqcphenbkd
erijafyzloguvxoctqmnhenbkd
ssijafyzllguvxwbtqmphenbkd
sriaafyzloguvxwctqqphenbkv
frijafyzloguvswctwmphenbkd
srijafyzyogkvxwctqmprenbkd
syijafyzuoguvxwctqmkhenbkd
srijafyzloganxwctqmphenbkf
srijafyzloguvxwftqmxhenbkq
srijafyflogxvxwctqmghenbkd
srijafyzsoguvxwctqmpjenwkd
srujafylloguvxwctqmphenckd
srijafyzlpzuvxwctqmphenbud
srijafyzlogfvxwctqmhhenbwd
srijafjzlogusxwctqmphepbkd
srijlfyzloguvxwctqfphenzkd
srijafyzlogwvxwctqyphenbqd
srijafyzloluvxwctqtphenukd
srizafyzlowuvxwctqmphqnbkd
sritafkzlkguvxwctqmphenbkd
sbijafdzloguvxgctqmphenbkd
crijafyeloguvxwctqmpsenbkd
srijafyvlogulxwctqmphenbkk
srijafyologuvxwctqmehegbkd
siijafyzloguvxwctjmphenbmd
srijafyzlupuvxwctqmpheabkd
srijafyzlogumxwctqqphanbkd
srijxfyzlogujxwcqqmphenbkd
irijafizeoguvxwctqmphenbkd
sgijafyzloguvtwctqmpfenbkd
srijzfyzloguvmwctnmphenbkd
srijafyzwohuvxwctqmthenbkd
srijafyzlhguvxoctqwphenbkd
srgjafyplogxvxwctqmphenbkd
srijafyqlogovxwctqzphenbkd
srijafjzloguvlnvtqmphenbkd
srijafyzooguvxwctqmphenvud
srijafyzgoguvxwctumphgnbkd
srijaffzloguvxwdqqmphenbkd
srijafyzlogugxwctqxphenbkr
srijafyzlogutxwctqmmcenbkd
srifafyzlhguwxwctqmphenbkd
mrimajyzloguvxwctqmphenbkd
sriyafyzloguvxwcthmphejbkd
srieakyzlokuvxwctqmphenbkd
srisafyzloguhxwctqmphecbkd
srijanyzloguvxcctqmxhenbkd
srijafyzypguvxwctqmqhenbkd
sryjtfyzlvguvxwctqmphenbkd
srijafyzlsguvxwctqmqfenbkd
srijafyzlogudxwbtqwphenbkd
srijysyzloguvxwctqmpvenbkd
srijafyzloggvxwjtqmphegbkd
srijgfyzloguvxwctqmbhdnbkd
ssijufyzloguvawctqmphenbkd
skojafyzloguvxwctqmphenbnd
srijafylloguvxwcqqmpienbkd
trioafyzloguvqwctqmphenbkd
srijafydloguvxwctqmpzjnbkd
saijafvzloguvxwcqqmphenbkd
srhjapyzloguvxwctqmbhenbkd
srijafyzlfguvxwcsqmpwenbkd
shijafyzboguvxwctqmphenbmd
srizafysloguvxwrtqmphenbkd
srijafyzloguvxwciqmwhenbkj
qrijafyzloduvxwctqmphenbko
srijefyuloguvxwctqmphenbed
srijafyzlobuvxwctqmphenhbd
srijafyzloxuvxwctqmpheabkq
srijafyzloguvrwctqmghenkkd
sfisafywloguvxwctqmphenbkd
srgjafyzlogurxwctqmphenbkp
srijafhzloguvxwcjqmphenhkd
srijafyylogufxwrtqmphenbkd
srijafyzvoguvxwzkqmphenbkd
sqijafyzloguvxwctqmpheqbxd
srijafyvloguvxwctqzpherbkd
srijufyzloguvxlcsqmphenbkd
srijafykloguvxlccqmphenbkd
srijafyzloguexwcrqmphenzkd
sridifyzloguyxwctqmphenbkd
srijafyzlogfvxwctqlphenbkl
srijafyzlodqdxwctqmphenbkd
srijafyzloruvxactqmphenekd
grijafyzloguvxpctmmphenbkd
srsjakyzloguvxwctqmphvnbkd
srikafyvloguvxwrtqmphenbkd
srijafyzloguvxwctqjpserbkd
jrijafyzloguvxwctqmpgesbkd
swijafyzluguvxwctqmfhenbkd
srijanynlogovxwctqmphenbkd
jrijafyzloguvxwctymphrnbkd
srinafyzloguvewctqmphenbzd
srijakyzloguvxwctqmphcnbka
srijafyhlobuvxwctqmphenbka
srijafyzcogusxwctqmphwnbkd
srijavyzlosuvxwctqmphjnbkd
orijafyzxoguvxwcnqmphenbkd
srijafyzlogcvxwvtqmthenbkd
srijapyzloauvxwctqmphenvkd
srijaflzloguhxwctqmphenbwd
smijafyzlonuvxwctqmphenbkw
jrijafyzloguvxwclqmnhenbkd
srijaqyzloguvqwctqmphenskd
srijasyzloguvxwctqmvhenbku
crijtfyzloguvxwctqmthenbkd
srrkafyzvoguvxwctqmphenbkd
srijatyzloguvewctqmphenbld
srfjafyyloguvnwctqmphenbkd
srijafyzloguvxwctqjpbenbkt
hrijafyzooguvxwctqmphenbld
srijafbzlogscxwctqmphenbkd
srinafyzlogxvxwctqqphenbkd
slijafyzloglvxwctqmphenbdd
srijafyzlogjvxwcsqmphenbld
sryjcfyzloguvewctqmphenbkd
srijafyzloguexwctqmohknbkd
jaijafyzlogevxwctqmphenbkd
srijafbzlogavxwctqmphenbki
srijafozlogpvxwctqmphgnbkd
srijdfyzloguvxwczqmphenbkm
srijafyzlobuvxwctqmphxndkd
mrijifyzlhguvxwctqmphenbkd
srijafyzloguvxbctumphjnbkd
srijafyzloyuvxwptqmphlnbkd
arijafyzloguvxwcsqmohenbkd
srijaftzioguvxwttqmphenbkd
srijafyzlqsuvxwctqmphxnbkd
srijafyzioguvxwctqnphetbkd
prijafbzloguvxdctqmphenbkd
srijaeyzlnguvxwmtqmphenbkd
srijofyzloguvqwctqmphonbkd
srixaryzpoguvxwctqmphenbkd
srijafyzlowuvxwcwhmphenbkd
srijafydloguvxwctqmptenikd
srijqfyzlogtvfwctqmphenbkd
srijafyzloguvxlctqmpvenbgd
srijafyzlbguvxwjtqgphenbkd
srijafyzlohuqxwctqmphenbka
srijafyzroguvxictqmphynbkd
srijafyzloguvxdctjmphenjkd
srijaoczloguvxwctqmphenbjd
srajafhzloguvxwctqmphenbke
srijofyzloduvxwctqmphanbkd
srijafytloguvxwmtnmphenbkd
srijafyzuoguvxwceqmpgenbkd
rrijafyzloyuvxwctqmphlnbkd
srljafyzloguvxictqmohenbkd
srijafyzlogulxwcrqrphenbkd
srajafyzloguvxwctqmphanbke
srijafyzlhguvxwxtqmpheabkd
sxijafyzloggwxwctqmphenbkd
srijafyultguvxwctqmphinbkd
srijafyzloguvtwctqmfhvnbkd
srijafwzloruvxwctquphenbkd
srbjafyzxoguuxwctqmphenbkd
erijafyzlxguvxbctqmphenbkd
srijagyzlojubxwctqmphenbkd
srijafyzloguvxwdtqmchenakd
srijafkzlogukxwctqiphenbkd
mridafyzloguvxwctqmphenrkd
szqjafyzloguvxwctqmpheibkd
srijahyzloguvxwctcmphenekd
srijafyzloguvxwczpuphenbkd
srijafyzcoguvfwctqmphenbkq
qriiafyzloguvxwctqmpheebkd
srijpfyzloguvxlctqmphenokd
srijzfyzlotuvxwcjqmphenbkd
srinafyqloguvxwctfmphenbkd
srijafyzlogjvxpltqmphenbkd
srijafyzlotuvxwutqmphenbtd
sridafyzloguvxwctqmpyenokd
srxjafyzqogyvxwctqmphenbkd
ssijafyzzoguvxwctqmphenbad
srijafrzloguvxwctqmphekpkd
srijafyzlfgrvxactqmphenbkd
srijafyzroguvxwttqmphekbkd
srijefyzloguvxwctqmpqenbrd
srijefycloguvxwctqmchenbkd
srzjafyzloguvxwcqqmphanbkd
srijauyzlhguvxwctqmphenbgd
srijafyzloguvmwvnqmphenbkd
srihafyzloguvlwotqmphenbkd
srigafyzloguvxwctqmphennsd
sriuafzzloguvxwcuqmphenbkd
srijavuzllguvxwctqmphenbkd
srijafjzloguvlnctqmphenbkd
lrirafyzloguvxwctqmphenbld
soijarxzloguvxwctqmphenbkd
srijapyzlnguvxwctqmdhenbkd
srijafyzkogujxmctqmphenbkd
srijafuzloguvxwcsqvphenbkd
srijagyzzoguvxwctqmpvenbkd
srijafyzlovuvxwctqmrhenbxd
srijafyzqoguvxwctwmpienbkd
sxijafyzloguvxwutqmphenlkd
srijafyzlhgzvxwctqmphqnbkd
srijajyzloguvxwcbwmphenbkd
srijazyzloguvxwhtqmphenbkx
srgjafyzloguvvwctqmphdnbkd
rrivafyzloguvxjctqmphenbkd
srijifyzdoguvxwctqmphenbka
hrijafyzloguvxectqmpheybkd"""
startTime = time.time()
inputList = list(map(str, inputStr.splitlines()))
numRepeatsChar = 0
doubleDupes = 0
tripleDupes = 0
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
checkSum = doubleDupes * tripleDupes
print('Checksum: ' + str(checkSum))
print('%s seconds' % (time.time() - startTime))
<|reserved_special_token_1|>
import time
inputStr = """crruafyzloguvxwctqmphenbkd
srcjafyzlcguvrwctqmphenbkd
srijafyzlogbpxwctgmphenbkd
zrijafyzloguvxrctqmphendkd
srijabyzloguvowcqqmphenbkd
srijafyzsoguvxwctbmpienbkd
srirtfyzlognvxwctqmphenbkd
srijafyzloguvxwctgmphenbmq
senjafyzloguvxectqmphenbkd
srijafyeloguvxwwtqmphembkd
srijafyzlogurxtctqmpkenbkd
srijafyzlkguvxictqhphenbkd
srijafgzlogunxwctqophenbkd
shijabyzloguvxwctqmqhenbkd
srjoafyzloguvxwctqmphenbwd
srijafyhloguvxwmtqmphenkkd
srijadyzlogwvxwctqmphenbed
brijafyzloguvmwctqmphenhkd
smijafyzlhguvxwctqmphjnbkd
sriqafvzloguvxwctqmpheebkd
srijafyzloguvxwisqmpuenbkd
mrijakyuloguvxwctqmphenbkd
srnfafyzloguvxwctqmphgnbkd
srijadyzloguvxwhfqmphenbkd
srijafhzloguvxwctdmlhenbkd
srijafyzloguvxwcsqmphykbkd
srijafyzlogwvxwatqmphhnbkd
srijafyzlozqvxwctqmphenbku
srijafyzloguvxwcbamphenbgd
srijafyzlfguvxwctqmphzybkd
srijafyzloguqxwetqmphenkkd
srijafyylogubxwttqmphenbkd
srijafyzloguvxzctadphenbkd
srijafyzloguoxwhtqmchenbkd
srijafyzloguvxwcvqmzhenbko
srijnfyzloguvxwctqmchenjkd
srijaryzloggvxwctqzphenbkd
srijafhzleguvxwcxqmphenbkd
ssijafyzllguvxfctqmphenbkd
srijafyzloguvxdctqmfhenbcd
srijafyzloguvxfctqmplynbkd
srijaftzlogavxwcrqmphenbkd
sriwaoyzloguvxwctqmphenbtd
srijahyzlogunxwctqmphenbvd
srjjafyzloguzxwctumphenbkd
nrijafyzlxguvxwctqmphanbkd
srijafezlqguyxwctqmphenbkd
srijafygloguvxwjtqcphenbkd
erijafyzloguvxoctqmnhenbkd
ssijafyzllguvxwbtqmphenbkd
sriaafyzloguvxwctqqphenbkv
frijafyzloguvswctwmphenbkd
srijafyzyogkvxwctqmprenbkd
syijafyzuoguvxwctqmkhenbkd
srijafyzloganxwctqmphenbkf
srijafyzloguvxwftqmxhenbkq
srijafyflogxvxwctqmghenbkd
srijafyzsoguvxwctqmpjenwkd
srujafylloguvxwctqmphenckd
srijafyzlpzuvxwctqmphenbud
srijafyzlogfvxwctqmhhenbwd
srijafjzlogusxwctqmphepbkd
srijlfyzloguvxwctqfphenzkd
srijafyzlogwvxwctqyphenbqd
srijafyzloluvxwctqtphenukd
srizafyzlowuvxwctqmphqnbkd
sritafkzlkguvxwctqmphenbkd
sbijafdzloguvxgctqmphenbkd
crijafyeloguvxwctqmpsenbkd
srijafyvlogulxwctqmphenbkk
srijafyologuvxwctqmehegbkd
siijafyzloguvxwctjmphenbmd
srijafyzlupuvxwctqmpheabkd
srijafyzlogumxwctqqphanbkd
srijxfyzlogujxwcqqmphenbkd
irijafizeoguvxwctqmphenbkd
sgijafyzloguvtwctqmpfenbkd
srijzfyzloguvmwctnmphenbkd
srijafyzwohuvxwctqmthenbkd
srijafyzlhguvxoctqwphenbkd
srgjafyplogxvxwctqmphenbkd
srijafyqlogovxwctqzphenbkd
srijafjzloguvlnvtqmphenbkd
srijafyzooguvxwctqmphenvud
srijafyzgoguvxwctumphgnbkd
srijaffzloguvxwdqqmphenbkd
srijafyzlogugxwctqxphenbkr
srijafyzlogutxwctqmmcenbkd
srifafyzlhguwxwctqmphenbkd
mrimajyzloguvxwctqmphenbkd
sriyafyzloguvxwcthmphejbkd
srieakyzlokuvxwctqmphenbkd
srisafyzloguhxwctqmphecbkd
srijanyzloguvxcctqmxhenbkd
srijafyzypguvxwctqmqhenbkd
sryjtfyzlvguvxwctqmphenbkd
srijafyzlsguvxwctqmqfenbkd
srijafyzlogudxwbtqwphenbkd
srijysyzloguvxwctqmpvenbkd
srijafyzloggvxwjtqmphegbkd
srijgfyzloguvxwctqmbhdnbkd
ssijufyzloguvawctqmphenbkd
skojafyzloguvxwctqmphenbnd
srijafylloguvxwcqqmpienbkd
trioafyzloguvqwctqmphenbkd
srijafydloguvxwctqmpzjnbkd
saijafvzloguvxwcqqmphenbkd
srhjapyzloguvxwctqmbhenbkd
srijafyzlfguvxwcsqmpwenbkd
shijafyzboguvxwctqmphenbmd
srizafysloguvxwrtqmphenbkd
srijafyzloguvxwciqmwhenbkj
qrijafyzloduvxwctqmphenbko
srijefyuloguvxwctqmphenbed
srijafyzlobuvxwctqmphenhbd
srijafyzloxuvxwctqmpheabkq
srijafyzloguvrwctqmghenkkd
sfisafywloguvxwctqmphenbkd
srgjafyzlogurxwctqmphenbkp
srijafhzloguvxwcjqmphenhkd
srijafyylogufxwrtqmphenbkd
srijafyzvoguvxwzkqmphenbkd
sqijafyzloguvxwctqmpheqbxd
srijafyvloguvxwctqzpherbkd
srijufyzloguvxlcsqmphenbkd
srijafykloguvxlccqmphenbkd
srijafyzloguexwcrqmphenzkd
sridifyzloguyxwctqmphenbkd
srijafyzlogfvxwctqlphenbkl
srijafyzlodqdxwctqmphenbkd
srijafyzloruvxactqmphenekd
grijafyzloguvxpctmmphenbkd
srsjakyzloguvxwctqmphvnbkd
srikafyvloguvxwrtqmphenbkd
srijafyzloguvxwctqjpserbkd
jrijafyzloguvxwctqmpgesbkd
swijafyzluguvxwctqmfhenbkd
srijanynlogovxwctqmphenbkd
jrijafyzloguvxwctymphrnbkd
srinafyzloguvewctqmphenbzd
srijakyzloguvxwctqmphcnbka
srijafyhlobuvxwctqmphenbka
srijafyzcogusxwctqmphwnbkd
srijavyzlosuvxwctqmphjnbkd
orijafyzxoguvxwcnqmphenbkd
srijafyzlogcvxwvtqmthenbkd
srijapyzloauvxwctqmphenvkd
srijaflzloguhxwctqmphenbwd
smijafyzlonuvxwctqmphenbkw
jrijafyzloguvxwclqmnhenbkd
srijaqyzloguvqwctqmphenskd
srijasyzloguvxwctqmvhenbku
crijtfyzloguvxwctqmthenbkd
srrkafyzvoguvxwctqmphenbkd
srijatyzloguvewctqmphenbld
srfjafyyloguvnwctqmphenbkd
srijafyzloguvxwctqjpbenbkt
hrijafyzooguvxwctqmphenbld
srijafbzlogscxwctqmphenbkd
srinafyzlogxvxwctqqphenbkd
slijafyzloglvxwctqmphenbdd
srijafyzlogjvxwcsqmphenbld
sryjcfyzloguvewctqmphenbkd
srijafyzloguexwctqmohknbkd
jaijafyzlogevxwctqmphenbkd
srijafbzlogavxwctqmphenbki
srijafozlogpvxwctqmphgnbkd
srijdfyzloguvxwczqmphenbkm
srijafyzlobuvxwctqmphxndkd
mrijifyzlhguvxwctqmphenbkd
srijafyzloguvxbctumphjnbkd
srijafyzloyuvxwptqmphlnbkd
arijafyzloguvxwcsqmohenbkd
srijaftzioguvxwttqmphenbkd
srijafyzlqsuvxwctqmphxnbkd
srijafyzioguvxwctqnphetbkd
prijafbzloguvxdctqmphenbkd
srijaeyzlnguvxwmtqmphenbkd
srijofyzloguvqwctqmphonbkd
srixaryzpoguvxwctqmphenbkd
srijafyzlowuvxwcwhmphenbkd
srijafydloguvxwctqmptenikd
srijqfyzlogtvfwctqmphenbkd
srijafyzloguvxlctqmpvenbgd
srijafyzlbguvxwjtqgphenbkd
srijafyzlohuqxwctqmphenbka
srijafyzroguvxictqmphynbkd
srijafyzloguvxdctjmphenjkd
srijaoczloguvxwctqmphenbjd
srajafhzloguvxwctqmphenbke
srijofyzloduvxwctqmphanbkd
srijafytloguvxwmtnmphenbkd
srijafyzuoguvxwceqmpgenbkd
rrijafyzloyuvxwctqmphlnbkd
srljafyzloguvxictqmohenbkd
srijafyzlogulxwcrqrphenbkd
srajafyzloguvxwctqmphanbke
srijafyzlhguvxwxtqmpheabkd
sxijafyzloggwxwctqmphenbkd
srijafyultguvxwctqmphinbkd
srijafyzloguvtwctqmfhvnbkd
srijafwzloruvxwctquphenbkd
srbjafyzxoguuxwctqmphenbkd
erijafyzlxguvxbctqmphenbkd
srijagyzlojubxwctqmphenbkd
srijafyzloguvxwdtqmchenakd
srijafkzlogukxwctqiphenbkd
mridafyzloguvxwctqmphenrkd
szqjafyzloguvxwctqmpheibkd
srijahyzloguvxwctcmphenekd
srijafyzloguvxwczpuphenbkd
srijafyzcoguvfwctqmphenbkq
qriiafyzloguvxwctqmpheebkd
srijpfyzloguvxlctqmphenokd
srijzfyzlotuvxwcjqmphenbkd
srinafyqloguvxwctfmphenbkd
srijafyzlogjvxpltqmphenbkd
srijafyzlotuvxwutqmphenbtd
sridafyzloguvxwctqmpyenokd
srxjafyzqogyvxwctqmphenbkd
ssijafyzzoguvxwctqmphenbad
srijafrzloguvxwctqmphekpkd
srijafyzlfgrvxactqmphenbkd
srijafyzroguvxwttqmphekbkd
srijefyzloguvxwctqmpqenbrd
srijefycloguvxwctqmchenbkd
srzjafyzloguvxwcqqmphanbkd
srijauyzlhguvxwctqmphenbgd
srijafyzloguvmwvnqmphenbkd
srihafyzloguvlwotqmphenbkd
srigafyzloguvxwctqmphennsd
sriuafzzloguvxwcuqmphenbkd
srijavuzllguvxwctqmphenbkd
srijafjzloguvlnctqmphenbkd
lrirafyzloguvxwctqmphenbld
soijarxzloguvxwctqmphenbkd
srijapyzlnguvxwctqmdhenbkd
srijafyzkogujxmctqmphenbkd
srijafuzloguvxwcsqvphenbkd
srijagyzzoguvxwctqmpvenbkd
srijafyzlovuvxwctqmrhenbxd
srijafyzqoguvxwctwmpienbkd
sxijafyzloguvxwutqmphenlkd
srijafyzlhgzvxwctqmphqnbkd
srijajyzloguvxwcbwmphenbkd
srijazyzloguvxwhtqmphenbkx
srgjafyzloguvvwctqmphdnbkd
rrivafyzloguvxjctqmphenbkd
srijifyzdoguvxwctqmphenbka
hrijafyzloguvxectqmpheybkd"""
startTime = time.time()
inputList = list(map(str, inputStr.splitlines()))
numRepeatsChar = 0
doubleDupes = 0
tripleDupes = 0
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
checkSum = doubleDupes * tripleDupes
print('Checksum: ' + str(checkSum))
print("%s seconds" % (time.time() - startTime))
|
flexible
|
{
"blob_id": "9620479e9ac27c1c7833c9a31b9cb18408b8d361",
"index": 4019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\n<mask token>\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-3": "<mask token>\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-4": "import time\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-5": "import time\n\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\n\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\n\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\n\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n \n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n \n elif hasDoubleDupes and hasTripleDupes:\n break\n\n print(doubleDupes)\n print(tripleDupes)\n\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\n\nprint(\"%s seconds\" % (time.time() - startTime))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def fibonacci(quantidade):
resultado = [1, 2]
# while True:
# substituir o while pelo for, em um range do 2° valor da lista, correr até
# o valor definido na função "Quantidade"
for _ in range(2, quantidade):
# desta forma ele irá realizar a função do 2° da lista até atingir
# o valor de quantiade.
# utiziamos o _ no for, para dizer que é uma função não utilizad
resultado.append(sum(resultado[-2:]))
return resultado
for fib in fibonacci(20):
print(fib)
|
normal
|
{
"blob_id": "83c7bb2e109f8affd9e2a12e8c5370b0f5a34048",
"index": 653,
"step-1": "<mask token>\n",
"step-2": "def fibonacci(quantidade):\n resultado = [1, 2]\n for _ in range(2, quantidade):\n resultado.append(sum(resultado[-2:]))\n return resultado\n\n\n<mask token>\n",
"step-3": "def fibonacci(quantidade):\n resultado = [1, 2]\n for _ in range(2, quantidade):\n resultado.append(sum(resultado[-2:]))\n return resultado\n\n\nfor fib in fibonacci(20):\n print(fib)\n",
"step-4": "def fibonacci(quantidade):\n resultado = [1, 2]\n# while True:\n# substituir o while pelo for, em um range do 2° valor da lista, correr até\n# o valor definido na função \"Quantidade\"\n for _ in range(2, quantidade):\n # desta forma ele irá realizar a função do 2° da lista até atingir\n # o valor de quantiade.\n # utiziamos o _ no for, para dizer que é uma função não utilizad\n resultado.append(sum(resultado[-2:]))\n return resultado\n\n\nfor fib in fibonacci(20):\n print(fib)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User_Game(CPU_Game):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User_Game(CPU_Game):
def get_user_phrase(self):
correct_form = False
while not correct_form:
correct_form = True
self.phrase = input('Please input a phrase: ').upper()
for i in range(0, len(self.phrase)):
alpha_space = self.phrase[i].isalpha() or self.phrase[i
].isspace()
if not alpha_space:
correct_form = False
print(Warning.YELLOW +
'\nPhrase needs to be all letters!!!\n' + Warning.END)
break
if self.phrase == '':
correct_form = False
print(Warning.YELLOW + '\nDid you mean to input nothing?',
""" Do you want to play or not?!?!
""" + Warning.END)
<|reserved_special_token_1|>
from cpu_game import CPU_Game
from warning_color import Warning
class User_Game(CPU_Game):
def get_user_phrase(self):
correct_form = False
while not correct_form:
correct_form = True
self.phrase = input('Please input a phrase: ').upper()
for i in range(0, len(self.phrase)):
alpha_space = self.phrase[i].isalpha() or self.phrase[i
].isspace()
if not alpha_space:
correct_form = False
print(Warning.YELLOW +
'\nPhrase needs to be all letters!!!\n' + Warning.END)
break
if self.phrase == '':
correct_form = False
print(Warning.YELLOW + '\nDid you mean to input nothing?',
""" Do you want to play or not?!?!
""" + Warning.END)
<|reserved_special_token_1|>
#Program written and maintained by Matthew Meyerink
#File responsible for defining the game based on user input
from cpu_game import CPU_Game
from warning_color import Warning
class User_Game(CPU_Game):
#Get the user phrase to start the game
def get_user_phrase(self):
correct_form = False
while (not correct_form):
correct_form = True
#Recieve the input phrase
self.phrase = input("Please input a phrase: ").upper()
#Check to make sure no numbers or special characters in phrase
for i in range(0, len(self.phrase)):
alpha_space = (self.phrase[i].isalpha()
or self.phrase[i].isspace())
if not alpha_space:
correct_form = False
print(Warning.YELLOW +
"\nPhrase needs to be all letters!!!\n" +
Warning.END)
break
#Check to make sure phrase isn't empty
if self.phrase == "":
correct_form = False
print(Warning.YELLOW +
"\nDid you mean to input nothing?",
" Do you want to play or not?!?!\n" +
Warning.END)
|
flexible
|
{
"blob_id": "d0dbf5a13b8e718ed426a254546ba13da12b2c3e",
"index": 4149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass User_Game(CPU_Game):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass User_Game(CPU_Game):\n\n def get_user_phrase(self):\n correct_form = False\n while not correct_form:\n correct_form = True\n self.phrase = input('Please input a phrase: ').upper()\n for i in range(0, len(self.phrase)):\n alpha_space = self.phrase[i].isalpha() or self.phrase[i\n ].isspace()\n if not alpha_space:\n correct_form = False\n print(Warning.YELLOW +\n '\\nPhrase needs to be all letters!!!\\n' + Warning.END)\n break\n if self.phrase == '':\n correct_form = False\n print(Warning.YELLOW + '\\nDid you mean to input nothing?', \n \"\"\" Do you want to play or not?!?!\n\"\"\" + Warning.END)\n",
"step-4": "from cpu_game import CPU_Game\nfrom warning_color import Warning\n\n\nclass User_Game(CPU_Game):\n\n def get_user_phrase(self):\n correct_form = False\n while not correct_form:\n correct_form = True\n self.phrase = input('Please input a phrase: ').upper()\n for i in range(0, len(self.phrase)):\n alpha_space = self.phrase[i].isalpha() or self.phrase[i\n ].isspace()\n if not alpha_space:\n correct_form = False\n print(Warning.YELLOW +\n '\\nPhrase needs to be all letters!!!\\n' + Warning.END)\n break\n if self.phrase == '':\n correct_form = False\n print(Warning.YELLOW + '\\nDid you mean to input nothing?', \n \"\"\" Do you want to play or not?!?!\n\"\"\" + Warning.END)\n",
"step-5": "\n#Program written and maintained by Matthew Meyerink\n\n#File responsible for defining the game based on user input\n\nfrom cpu_game import CPU_Game\nfrom warning_color import Warning\n\nclass User_Game(CPU_Game):\n\n #Get the user phrase to start the game\n def get_user_phrase(self):\n correct_form = False\n while (not correct_form):\n\n correct_form = True\n\n #Recieve the input phrase\n self.phrase = input(\"Please input a phrase: \").upper()\n\n #Check to make sure no numbers or special characters in phrase\n for i in range(0, len(self.phrase)):\n alpha_space = (self.phrase[i].isalpha()\n or self.phrase[i].isspace())\n if not alpha_space:\n correct_form = False\n print(Warning.YELLOW +\n \"\\nPhrase needs to be all letters!!!\\n\" +\n Warning.END)\n break\n\n #Check to make sure phrase isn't empty\n if self.phrase == \"\":\n correct_form = False\n print(Warning.YELLOW +\n \"\\nDid you mean to input nothing?\",\n \" Do you want to play or not?!?!\\n\" +\n Warning.END)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@PublicAPI
class ParameterNoise(Exploration):
<|reserved_special_token_0|>
def __init__(self, action_space, *, framework: str, policy_config: dict,
model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=
10000, sub_exploration: Optional[dict]=None, **kwargs):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(action_space, policy_config=policy_config, model=
model, framework=framework, **kwargs)
self.stddev = get_variable(initial_stddev, framework=self.framework,
tf_name='stddev')
self.stddev_val = initial_stddev
self.model_variables = [v for k, v in self.model.
trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]
self.noise = []
for var in self.model_variables:
name_ = var.name.split(':')[0] + '_noisy' if var.name else ''
self.noise.append(get_variable(np.zeros(var.shape, dtype=np.
float32), framework=self.framework, tf_name=name_,
torch_tensor=True, device=self.device))
if self.framework == 'tf' and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
self.weights_are_currently_noisy = False
if sub_exploration is None:
if isinstance(self.action_space, Discrete):
sub_exploration = {'type': 'EpsilonGreedy',
'epsilon_schedule': {'type': 'PiecewiseSchedule',
'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (
random_timesteps + 2, 0.01)], 'outside_value': 0.01}}
elif isinstance(self.action_space, Box):
sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',
'random_timesteps': random_timesteps}
else:
raise NotImplementedError
self.sub_exploration = from_config(Exploration, sub_exploration,
framework=self.framework, action_space=self.action_space,
policy_config=self.policy_config, model=self.model, **kwargs)
self.episode_started = False
@override(Exploration)
def before_compute_actions(self, *, timestep: Optional[int]=None,
explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):
explore = explore if explore is not None else self.policy_config[
'explore']
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(self, *, action_distribution:
ActionDistribution, timestep: Union[TensorType, int], explore:
Union[TensorType, bool]):
return self.sub_exploration.get_exploration_action(action_distribution
=action_distribution, timestep=timestep, explore=explore)
@override(Exploration)
def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=
None, episode: int=None, tf_sess: Optional['tf.Session']=None):
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None,
tf_sess=None):
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(self, policy: 'Policy', sample_batch:
SampleBatch, tf_sess: Optional['tf.Session']=None):
noisy_action_dist = noise_free_action_dist = None
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=not self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
distance = np.nanmean(np.sum(noise_free_action_dist * np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
), 1))
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
'cur_epsilon']
delta = -np.log(1 - current_epsilon + current_epsilon / self.
action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
distance = np.sqrt(np.mean(np.square(noise_free_action_dist -
noisy_action_dist)))
current_scale = self.sub_exploration.get_state(sess=tf_sess)[
'cur_scale']
delta = getattr(self.sub_exploration, 'ou_sigma', 0.2
) * current_scale
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == 'tf':
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == 'tf2':
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]
.size()), std=self.stddev).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(tf1.assign(noise, tf.random.normal(shape=
noise.shape, stddev=self.stddev, dtype=tf.float32)))
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == 'tf':
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
assert self.weights_are_currently_noisy is False
if self.framework == 'tf':
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == 'tf2':
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
assert self.weights_are_currently_noisy is True
if self.framework == 'tf':
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == 'tf2':
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {'cur_stddev': self.stddev_val}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@PublicAPI
class ParameterNoise(Exploration):
<|reserved_special_token_0|>
def __init__(self, action_space, *, framework: str, policy_config: dict,
model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=
10000, sub_exploration: Optional[dict]=None, **kwargs):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(action_space, policy_config=policy_config, model=
model, framework=framework, **kwargs)
self.stddev = get_variable(initial_stddev, framework=self.framework,
tf_name='stddev')
self.stddev_val = initial_stddev
self.model_variables = [v for k, v in self.model.
trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]
self.noise = []
for var in self.model_variables:
name_ = var.name.split(':')[0] + '_noisy' if var.name else ''
self.noise.append(get_variable(np.zeros(var.shape, dtype=np.
float32), framework=self.framework, tf_name=name_,
torch_tensor=True, device=self.device))
if self.framework == 'tf' and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
self.weights_are_currently_noisy = False
if sub_exploration is None:
if isinstance(self.action_space, Discrete):
sub_exploration = {'type': 'EpsilonGreedy',
'epsilon_schedule': {'type': 'PiecewiseSchedule',
'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (
random_timesteps + 2, 0.01)], 'outside_value': 0.01}}
elif isinstance(self.action_space, Box):
sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',
'random_timesteps': random_timesteps}
else:
raise NotImplementedError
self.sub_exploration = from_config(Exploration, sub_exploration,
framework=self.framework, action_space=self.action_space,
policy_config=self.policy_config, model=self.model, **kwargs)
self.episode_started = False
@override(Exploration)
def before_compute_actions(self, *, timestep: Optional[int]=None,
explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):
explore = explore if explore is not None else self.policy_config[
'explore']
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(self, *, action_distribution:
ActionDistribution, timestep: Union[TensorType, int], explore:
Union[TensorType, bool]):
return self.sub_exploration.get_exploration_action(action_distribution
=action_distribution, timestep=timestep, explore=explore)
@override(Exploration)
def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=
None, episode: int=None, tf_sess: Optional['tf.Session']=None):
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None,
tf_sess=None):
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(self, policy: 'Policy', sample_batch:
SampleBatch, tf_sess: Optional['tf.Session']=None):
noisy_action_dist = noise_free_action_dist = None
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=not self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
distance = np.nanmean(np.sum(noise_free_action_dist * np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
), 1))
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
'cur_epsilon']
delta = -np.log(1 - current_epsilon + current_epsilon / self.
action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
distance = np.sqrt(np.mean(np.square(noise_free_action_dist -
noisy_action_dist)))
current_scale = self.sub_exploration.get_state(sess=tf_sess)[
'cur_scale']
delta = getattr(self.sub_exploration, 'ou_sigma', 0.2
) * current_scale
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == 'tf':
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == 'tf2':
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]
.size()), std=self.stddev).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(tf1.assign(noise, tf.random.normal(shape=
noise.shape, stddev=self.stddev, dtype=tf.float32)))
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == 'tf':
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
assert self.weights_are_currently_noisy is False
if self.framework == 'tf':
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == 'tf2':
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
assert self.weights_are_currently_noisy is True
if self.framework == 'tf':
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == 'tf2':
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {'cur_stddev': self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional['tf.Session']=None) ->None:
self.stddev_val = state['cur_stddev']
if self.framework == 'tf':
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(self, action_space, *, framework: str, policy_config: dict,
model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=
10000, sub_exploration: Optional[dict]=None, **kwargs):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(action_space, policy_config=policy_config, model=
model, framework=framework, **kwargs)
self.stddev = get_variable(initial_stddev, framework=self.framework,
tf_name='stddev')
self.stddev_val = initial_stddev
self.model_variables = [v for k, v in self.model.
trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]
self.noise = []
for var in self.model_variables:
name_ = var.name.split(':')[0] + '_noisy' if var.name else ''
self.noise.append(get_variable(np.zeros(var.shape, dtype=np.
float32), framework=self.framework, tf_name=name_,
torch_tensor=True, device=self.device))
if self.framework == 'tf' and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
self.weights_are_currently_noisy = False
if sub_exploration is None:
if isinstance(self.action_space, Discrete):
sub_exploration = {'type': 'EpsilonGreedy',
'epsilon_schedule': {'type': 'PiecewiseSchedule',
'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (
random_timesteps + 2, 0.01)], 'outside_value': 0.01}}
elif isinstance(self.action_space, Box):
sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',
'random_timesteps': random_timesteps}
else:
raise NotImplementedError
self.sub_exploration = from_config(Exploration, sub_exploration,
framework=self.framework, action_space=self.action_space,
policy_config=self.policy_config, model=self.model, **kwargs)
self.episode_started = False
@override(Exploration)
def before_compute_actions(self, *, timestep: Optional[int]=None,
explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):
explore = explore if explore is not None else self.policy_config[
'explore']
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(self, *, action_distribution:
ActionDistribution, timestep: Union[TensorType, int], explore:
Union[TensorType, bool]):
return self.sub_exploration.get_exploration_action(action_distribution
=action_distribution, timestep=timestep, explore=explore)
@override(Exploration)
def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=
None, episode: int=None, tf_sess: Optional['tf.Session']=None):
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None,
tf_sess=None):
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(self, policy: 'Policy', sample_batch:
SampleBatch, tf_sess: Optional['tf.Session']=None):
noisy_action_dist = noise_free_action_dist = None
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=not self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
distance = np.nanmean(np.sum(noise_free_action_dist * np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
), 1))
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
'cur_epsilon']
delta = -np.log(1 - current_epsilon + current_epsilon / self.
action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
distance = np.sqrt(np.mean(np.square(noise_free_action_dist -
noisy_action_dist)))
current_scale = self.sub_exploration.get_state(sess=tf_sess)[
'cur_scale']
delta = getattr(self.sub_exploration, 'ou_sigma', 0.2
) * current_scale
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == 'tf':
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == 'tf2':
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]
.size()), std=self.stddev).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(tf1.assign(noise, tf.random.normal(shape=
noise.shape, stddev=self.stddev, dtype=tf.float32)))
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == 'tf':
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
assert self.weights_are_currently_noisy is False
if self.framework == 'tf':
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == 'tf2':
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
assert self.weights_are_currently_noisy is True
if self.framework == 'tf':
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == 'tf2':
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {'cur_stddev': self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional['tf.Session']=None) ->None:
self.stddev_val = state['cur_stddev']
if self.framework == 'tf':
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
<|reserved_special_token_1|>
from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import TorchCategorical, TorchDeterministic
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(self, action_space, *, framework: str, policy_config: dict,
model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=
10000, sub_exploration: Optional[dict]=None, **kwargs):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(action_space, policy_config=policy_config, model=
model, framework=framework, **kwargs)
self.stddev = get_variable(initial_stddev, framework=self.framework,
tf_name='stddev')
self.stddev_val = initial_stddev
self.model_variables = [v for k, v in self.model.
trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]
self.noise = []
for var in self.model_variables:
name_ = var.name.split(':')[0] + '_noisy' if var.name else ''
self.noise.append(get_variable(np.zeros(var.shape, dtype=np.
float32), framework=self.framework, tf_name=name_,
torch_tensor=True, device=self.device))
if self.framework == 'tf' and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
self.weights_are_currently_noisy = False
if sub_exploration is None:
if isinstance(self.action_space, Discrete):
sub_exploration = {'type': 'EpsilonGreedy',
'epsilon_schedule': {'type': 'PiecewiseSchedule',
'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (
random_timesteps + 2, 0.01)], 'outside_value': 0.01}}
elif isinstance(self.action_space, Box):
sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',
'random_timesteps': random_timesteps}
else:
raise NotImplementedError
self.sub_exploration = from_config(Exploration, sub_exploration,
framework=self.framework, action_space=self.action_space,
policy_config=self.policy_config, model=self.model, **kwargs)
self.episode_started = False
@override(Exploration)
def before_compute_actions(self, *, timestep: Optional[int]=None,
explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):
explore = explore if explore is not None else self.policy_config[
'explore']
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(self, *, action_distribution:
ActionDistribution, timestep: Union[TensorType, int], explore:
Union[TensorType, bool]):
return self.sub_exploration.get_exploration_action(action_distribution
=action_distribution, timestep=timestep, explore=explore)
@override(Exploration)
def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=
None, episode: int=None, tf_sess: Optional['tf.Session']=None):
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None,
tf_sess=None):
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(self, policy: 'Policy', sample_batch:
SampleBatch, tf_sess: Optional['tf.Session']=None):
noisy_action_dist = noise_free_action_dist = None
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(input_dict=
sample_batch, explore=not self.weights_are_currently_noisy)
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
distance = np.nanmean(np.sum(noise_free_action_dist * np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
), 1))
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
'cur_epsilon']
delta = -np.log(1 - current_epsilon + current_epsilon / self.
action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)
):
distance = np.sqrt(np.mean(np.square(noise_free_action_dist -
noisy_action_dist)))
current_scale = self.sub_exploration.get_state(sess=tf_sess)[
'cur_scale']
delta = getattr(self.sub_exploration, 'ou_sigma', 0.2
) * current_scale
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == 'tf':
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == 'tf2':
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]
.size()), std=self.stddev).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(tf1.assign(noise, tf.random.normal(shape=
noise.shape, stddev=self.stddev, dtype=tf.float32)))
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == 'tf':
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
assert self.weights_are_currently_noisy is False
if self.framework == 'tf':
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == 'tf2':
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
assert self.weights_are_currently_noisy is True
if self.framework == 'tf':
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == 'tf2':
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {'cur_stddev': self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional['tf.Session']=None) ->None:
self.stddev_val = state['cur_stddev']
if self.framework == 'tf':
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
<|reserved_special_token_1|>
from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDeterministic,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
|
flexible
|
{
"blob_id": "b2b47b394eadebda5c51e89abd27832f9dbd4c8c",
"index": 4193,
"step-1": "<mask token>\n\n\n@PublicAPI\nclass ParameterNoise(Exploration):\n <mask token>\n\n def __init__(self, action_space, *, framework: str, policy_config: dict,\n model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=\n 10000, sub_exploration: Optional[dict]=None, **kwargs):\n \"\"\"Initializes a ParameterNoise Exploration object.\n\n Args:\n initial_stddev: The initial stddev to use for the noise.\n random_timesteps: The number of timesteps to act completely\n randomly (see [1]).\n sub_exploration: Optional sub-exploration config.\n None for auto-detection/setup.\n \"\"\"\n assert framework is not None\n super().__init__(action_space, policy_config=policy_config, model=\n model, framework=framework, **kwargs)\n self.stddev = get_variable(initial_stddev, framework=self.framework,\n tf_name='stddev')\n self.stddev_val = initial_stddev\n self.model_variables = [v for k, v in self.model.\n trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]\n self.noise = []\n for var in self.model_variables:\n name_ = var.name.split(':')[0] + '_noisy' if var.name else ''\n self.noise.append(get_variable(np.zeros(var.shape, dtype=np.\n float32), framework=self.framework, tf_name=name_,\n torch_tensor=True, device=self.device))\n if self.framework == 'tf' and not tf.executing_eagerly():\n self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()\n self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()\n self.tf_remove_noise_op = self._tf_remove_noise_op()\n with tf1.control_dependencies([self.tf_sample_new_noise_op]):\n add_op = self._tf_add_stored_noise_op()\n with tf1.control_dependencies([add_op]):\n self.tf_sample_new_noise_and_add_op = tf.no_op()\n self.weights_are_currently_noisy = False\n if sub_exploration is None:\n if isinstance(self.action_space, Discrete):\n sub_exploration = {'type': 'EpsilonGreedy',\n 'epsilon_schedule': {'type': 'PiecewiseSchedule',\n 'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (\n random_timesteps + 2, 0.01)], 'outside_value': 0.01}}\n elif isinstance(self.action_space, Box):\n sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',\n 'random_timesteps': random_timesteps}\n else:\n raise NotImplementedError\n self.sub_exploration = from_config(Exploration, sub_exploration,\n framework=self.framework, action_space=self.action_space,\n policy_config=self.policy_config, model=self.model, **kwargs)\n self.episode_started = False\n\n @override(Exploration)\n def before_compute_actions(self, *, timestep: Optional[int]=None,\n explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):\n explore = explore if explore is not None else self.policy_config[\n 'explore']\n if self.episode_started:\n self._delayed_on_episode_start(explore, tf_sess)\n if explore and not self.weights_are_currently_noisy:\n self._add_stored_noise(tf_sess=tf_sess)\n elif not explore and self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def get_exploration_action(self, *, action_distribution:\n ActionDistribution, timestep: Union[TensorType, int], explore:\n Union[TensorType, bool]):\n return self.sub_exploration.get_exploration_action(action_distribution\n =action_distribution, timestep=timestep, explore=explore)\n\n @override(Exploration)\n def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=\n None, episode: int=None, tf_sess: Optional['tf.Session']=None):\n self.episode_started = True\n\n def _delayed_on_episode_start(self, explore, tf_sess):\n if explore:\n self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)\n else:\n self._sample_new_noise(tf_sess=tf_sess)\n self.episode_started = False\n\n @override(Exploration)\n def on_episode_end(self, policy, *, environment=None, episode=None,\n tf_sess=None):\n if self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def postprocess_trajectory(self, policy: 'Policy', sample_batch:\n SampleBatch, tf_sess: Optional['tf.Session']=None):\n noisy_action_dist = noise_free_action_dist = None\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n else:\n raise NotImplementedError\n if self.weights_are_currently_noisy:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=not self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n if noisy_action_dist is None:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n delta = distance = None\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n distance = np.nanmean(np.sum(noise_free_action_dist * np.log(\n noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)\n ), 1))\n current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_epsilon']\n delta = -np.log(1 - current_epsilon + current_epsilon / self.\n action_space.n)\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n distance = np.sqrt(np.mean(np.square(noise_free_action_dist -\n noisy_action_dist)))\n current_scale = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_scale']\n delta = getattr(self.sub_exploration, 'ou_sigma', 0.2\n ) * current_scale\n if distance <= delta:\n self.stddev_val *= 1.01\n else:\n self.stddev_val /= 1.01\n self.set_state(self.get_state(), sess=tf_sess)\n return sample_batch\n\n def _sample_new_noise(self, *, tf_sess=None):\n \"\"\"Samples new noise and stores it in `self.noise`.\"\"\"\n if self.framework == 'tf':\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == 'tf2':\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]\n .size()), std=self.stddev).to(self.device)\n\n def _tf_sample_new_noise_op(self):\n added_noises = []\n for noise in self.noise:\n added_noises.append(tf1.assign(noise, tf.random.normal(shape=\n noise.shape, stddev=self.stddev, dtype=tf.float32)))\n return tf.group(*added_noises)\n\n def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):\n if self.framework == 'tf':\n if override and self.weights_are_currently_noisy:\n tf_sess.run(self.tf_remove_noise_op)\n tf_sess.run(self.tf_sample_new_noise_and_add_op)\n else:\n if override and self.weights_are_currently_noisy:\n self._remove_noise()\n self._sample_new_noise()\n self._add_stored_noise()\n self.weights_are_currently_noisy = True\n\n def _add_stored_noise(self, *, tf_sess=None):\n \"\"\"Adds the stored `self.noise` to the model's parameters.\n\n Note: No new sampling of noise here.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to add the\n stored noise to the (currently noise-free) weights.\n override: If True, undo any currently applied noise first,\n then add the currently stored noise.\n \"\"\"\n assert self.weights_are_currently_noisy is False\n if self.framework == 'tf':\n tf_sess.run(self.tf_add_stored_noise_op)\n elif self.framework == 'tf2':\n self._tf_add_stored_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = True\n\n def _tf_add_stored_noise_op(self):\n \"\"\"Generates tf-op that assigns the stored noise to weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to apply the already stored noise to the NN.\n \"\"\"\n add_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n add_noise_ops.append(tf1.assign_add(var, noise))\n ret = tf.group(*tuple(add_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n def _remove_noise(self, *, tf_sess=None):\n \"\"\"\n Removes the current action noise from the model parameters.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to remove\n the noise from the (currently noisy) weights.\n \"\"\"\n assert self.weights_are_currently_noisy is True\n if self.framework == 'tf':\n tf_sess.run(self.tf_remove_noise_op)\n elif self.framework == 'tf2':\n self._tf_remove_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(-noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = False\n\n def _tf_remove_noise_op(self):\n \"\"\"Generates a tf-op for removing noise from the model's weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to remve the currently stored noise from the NN.\n \"\"\"\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n @override(Exploration)\n def get_state(self, sess=None):\n return {'cur_stddev': self.stddev_val}\n <mask token>\n",
"step-2": "<mask token>\n\n\n@PublicAPI\nclass ParameterNoise(Exploration):\n <mask token>\n\n def __init__(self, action_space, *, framework: str, policy_config: dict,\n model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=\n 10000, sub_exploration: Optional[dict]=None, **kwargs):\n \"\"\"Initializes a ParameterNoise Exploration object.\n\n Args:\n initial_stddev: The initial stddev to use for the noise.\n random_timesteps: The number of timesteps to act completely\n randomly (see [1]).\n sub_exploration: Optional sub-exploration config.\n None for auto-detection/setup.\n \"\"\"\n assert framework is not None\n super().__init__(action_space, policy_config=policy_config, model=\n model, framework=framework, **kwargs)\n self.stddev = get_variable(initial_stddev, framework=self.framework,\n tf_name='stddev')\n self.stddev_val = initial_stddev\n self.model_variables = [v for k, v in self.model.\n trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]\n self.noise = []\n for var in self.model_variables:\n name_ = var.name.split(':')[0] + '_noisy' if var.name else ''\n self.noise.append(get_variable(np.zeros(var.shape, dtype=np.\n float32), framework=self.framework, tf_name=name_,\n torch_tensor=True, device=self.device))\n if self.framework == 'tf' and not tf.executing_eagerly():\n self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()\n self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()\n self.tf_remove_noise_op = self._tf_remove_noise_op()\n with tf1.control_dependencies([self.tf_sample_new_noise_op]):\n add_op = self._tf_add_stored_noise_op()\n with tf1.control_dependencies([add_op]):\n self.tf_sample_new_noise_and_add_op = tf.no_op()\n self.weights_are_currently_noisy = False\n if sub_exploration is None:\n if isinstance(self.action_space, Discrete):\n sub_exploration = {'type': 'EpsilonGreedy',\n 'epsilon_schedule': {'type': 'PiecewiseSchedule',\n 'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (\n random_timesteps + 2, 0.01)], 'outside_value': 0.01}}\n elif isinstance(self.action_space, Box):\n sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',\n 'random_timesteps': random_timesteps}\n else:\n raise NotImplementedError\n self.sub_exploration = from_config(Exploration, sub_exploration,\n framework=self.framework, action_space=self.action_space,\n policy_config=self.policy_config, model=self.model, **kwargs)\n self.episode_started = False\n\n @override(Exploration)\n def before_compute_actions(self, *, timestep: Optional[int]=None,\n explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):\n explore = explore if explore is not None else self.policy_config[\n 'explore']\n if self.episode_started:\n self._delayed_on_episode_start(explore, tf_sess)\n if explore and not self.weights_are_currently_noisy:\n self._add_stored_noise(tf_sess=tf_sess)\n elif not explore and self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def get_exploration_action(self, *, action_distribution:\n ActionDistribution, timestep: Union[TensorType, int], explore:\n Union[TensorType, bool]):\n return self.sub_exploration.get_exploration_action(action_distribution\n =action_distribution, timestep=timestep, explore=explore)\n\n @override(Exploration)\n def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=\n None, episode: int=None, tf_sess: Optional['tf.Session']=None):\n self.episode_started = True\n\n def _delayed_on_episode_start(self, explore, tf_sess):\n if explore:\n self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)\n else:\n self._sample_new_noise(tf_sess=tf_sess)\n self.episode_started = False\n\n @override(Exploration)\n def on_episode_end(self, policy, *, environment=None, episode=None,\n tf_sess=None):\n if self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def postprocess_trajectory(self, policy: 'Policy', sample_batch:\n SampleBatch, tf_sess: Optional['tf.Session']=None):\n noisy_action_dist = noise_free_action_dist = None\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n else:\n raise NotImplementedError\n if self.weights_are_currently_noisy:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=not self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n if noisy_action_dist is None:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n delta = distance = None\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n distance = np.nanmean(np.sum(noise_free_action_dist * np.log(\n noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)\n ), 1))\n current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_epsilon']\n delta = -np.log(1 - current_epsilon + current_epsilon / self.\n action_space.n)\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n distance = np.sqrt(np.mean(np.square(noise_free_action_dist -\n noisy_action_dist)))\n current_scale = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_scale']\n delta = getattr(self.sub_exploration, 'ou_sigma', 0.2\n ) * current_scale\n if distance <= delta:\n self.stddev_val *= 1.01\n else:\n self.stddev_val /= 1.01\n self.set_state(self.get_state(), sess=tf_sess)\n return sample_batch\n\n def _sample_new_noise(self, *, tf_sess=None):\n \"\"\"Samples new noise and stores it in `self.noise`.\"\"\"\n if self.framework == 'tf':\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == 'tf2':\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]\n .size()), std=self.stddev).to(self.device)\n\n def _tf_sample_new_noise_op(self):\n added_noises = []\n for noise in self.noise:\n added_noises.append(tf1.assign(noise, tf.random.normal(shape=\n noise.shape, stddev=self.stddev, dtype=tf.float32)))\n return tf.group(*added_noises)\n\n def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):\n if self.framework == 'tf':\n if override and self.weights_are_currently_noisy:\n tf_sess.run(self.tf_remove_noise_op)\n tf_sess.run(self.tf_sample_new_noise_and_add_op)\n else:\n if override and self.weights_are_currently_noisy:\n self._remove_noise()\n self._sample_new_noise()\n self._add_stored_noise()\n self.weights_are_currently_noisy = True\n\n def _add_stored_noise(self, *, tf_sess=None):\n \"\"\"Adds the stored `self.noise` to the model's parameters.\n\n Note: No new sampling of noise here.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to add the\n stored noise to the (currently noise-free) weights.\n override: If True, undo any currently applied noise first,\n then add the currently stored noise.\n \"\"\"\n assert self.weights_are_currently_noisy is False\n if self.framework == 'tf':\n tf_sess.run(self.tf_add_stored_noise_op)\n elif self.framework == 'tf2':\n self._tf_add_stored_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = True\n\n def _tf_add_stored_noise_op(self):\n \"\"\"Generates tf-op that assigns the stored noise to weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to apply the already stored noise to the NN.\n \"\"\"\n add_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n add_noise_ops.append(tf1.assign_add(var, noise))\n ret = tf.group(*tuple(add_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n def _remove_noise(self, *, tf_sess=None):\n \"\"\"\n Removes the current action noise from the model parameters.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to remove\n the noise from the (currently noisy) weights.\n \"\"\"\n assert self.weights_are_currently_noisy is True\n if self.framework == 'tf':\n tf_sess.run(self.tf_remove_noise_op)\n elif self.framework == 'tf2':\n self._tf_remove_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(-noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = False\n\n def _tf_remove_noise_op(self):\n \"\"\"Generates a tf-op for removing noise from the model's weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to remve the currently stored noise from the NN.\n \"\"\"\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n @override(Exploration)\n def get_state(self, sess=None):\n return {'cur_stddev': self.stddev_val}\n\n @override(Exploration)\n def set_state(self, state: dict, sess: Optional['tf.Session']=None) ->None:\n self.stddev_val = state['cur_stddev']\n if self.framework == 'tf':\n self.stddev.load(self.stddev_val, session=sess)\n elif isinstance(self.stddev, float):\n self.stddev = self.stddev_val\n else:\n self.stddev.assign(self.stddev_val)\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from ray.rllib.policy.policy import Policy\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\n@PublicAPI\nclass ParameterNoise(Exploration):\n \"\"\"An exploration that changes a Model's parameters.\n\n Implemented based on:\n [1] https://openai.com/research/better-exploration-with-parameter-noise\n [2] https://arxiv.org/pdf/1706.01905.pdf\n\n At the beginning of an episode, Gaussian noise is added to all weights\n of the model. At the end of the episode, the noise is undone and an action\n diff (pi-delta) is calculated, from which we determine the changes in the\n noise's stddev for the next episode.\n \"\"\"\n\n def __init__(self, action_space, *, framework: str, policy_config: dict,\n model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=\n 10000, sub_exploration: Optional[dict]=None, **kwargs):\n \"\"\"Initializes a ParameterNoise Exploration object.\n\n Args:\n initial_stddev: The initial stddev to use for the noise.\n random_timesteps: The number of timesteps to act completely\n randomly (see [1]).\n sub_exploration: Optional sub-exploration config.\n None for auto-detection/setup.\n \"\"\"\n assert framework is not None\n super().__init__(action_space, policy_config=policy_config, model=\n model, framework=framework, **kwargs)\n self.stddev = get_variable(initial_stddev, framework=self.framework,\n tf_name='stddev')\n self.stddev_val = initial_stddev\n self.model_variables = [v for k, v in self.model.\n trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]\n self.noise = []\n for var in self.model_variables:\n name_ = var.name.split(':')[0] + '_noisy' if var.name else ''\n self.noise.append(get_variable(np.zeros(var.shape, dtype=np.\n float32), framework=self.framework, tf_name=name_,\n torch_tensor=True, device=self.device))\n if self.framework == 'tf' and not tf.executing_eagerly():\n self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()\n self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()\n self.tf_remove_noise_op = self._tf_remove_noise_op()\n with tf1.control_dependencies([self.tf_sample_new_noise_op]):\n add_op = self._tf_add_stored_noise_op()\n with tf1.control_dependencies([add_op]):\n self.tf_sample_new_noise_and_add_op = tf.no_op()\n self.weights_are_currently_noisy = False\n if sub_exploration is None:\n if isinstance(self.action_space, Discrete):\n sub_exploration = {'type': 'EpsilonGreedy',\n 'epsilon_schedule': {'type': 'PiecewiseSchedule',\n 'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (\n random_timesteps + 2, 0.01)], 'outside_value': 0.01}}\n elif isinstance(self.action_space, Box):\n sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',\n 'random_timesteps': random_timesteps}\n else:\n raise NotImplementedError\n self.sub_exploration = from_config(Exploration, sub_exploration,\n framework=self.framework, action_space=self.action_space,\n policy_config=self.policy_config, model=self.model, **kwargs)\n self.episode_started = False\n\n @override(Exploration)\n def before_compute_actions(self, *, timestep: Optional[int]=None,\n explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):\n explore = explore if explore is not None else self.policy_config[\n 'explore']\n if self.episode_started:\n self._delayed_on_episode_start(explore, tf_sess)\n if explore and not self.weights_are_currently_noisy:\n self._add_stored_noise(tf_sess=tf_sess)\n elif not explore and self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def get_exploration_action(self, *, action_distribution:\n ActionDistribution, timestep: Union[TensorType, int], explore:\n Union[TensorType, bool]):\n return self.sub_exploration.get_exploration_action(action_distribution\n =action_distribution, timestep=timestep, explore=explore)\n\n @override(Exploration)\n def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=\n None, episode: int=None, tf_sess: Optional['tf.Session']=None):\n self.episode_started = True\n\n def _delayed_on_episode_start(self, explore, tf_sess):\n if explore:\n self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)\n else:\n self._sample_new_noise(tf_sess=tf_sess)\n self.episode_started = False\n\n @override(Exploration)\n def on_episode_end(self, policy, *, environment=None, episode=None,\n tf_sess=None):\n if self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def postprocess_trajectory(self, policy: 'Policy', sample_batch:\n SampleBatch, tf_sess: Optional['tf.Session']=None):\n noisy_action_dist = noise_free_action_dist = None\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n else:\n raise NotImplementedError\n if self.weights_are_currently_noisy:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=not self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n if noisy_action_dist is None:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n delta = distance = None\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n distance = np.nanmean(np.sum(noise_free_action_dist * np.log(\n noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)\n ), 1))\n current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_epsilon']\n delta = -np.log(1 - current_epsilon + current_epsilon / self.\n action_space.n)\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n distance = np.sqrt(np.mean(np.square(noise_free_action_dist -\n noisy_action_dist)))\n current_scale = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_scale']\n delta = getattr(self.sub_exploration, 'ou_sigma', 0.2\n ) * current_scale\n if distance <= delta:\n self.stddev_val *= 1.01\n else:\n self.stddev_val /= 1.01\n self.set_state(self.get_state(), sess=tf_sess)\n return sample_batch\n\n def _sample_new_noise(self, *, tf_sess=None):\n \"\"\"Samples new noise and stores it in `self.noise`.\"\"\"\n if self.framework == 'tf':\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == 'tf2':\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]\n .size()), std=self.stddev).to(self.device)\n\n def _tf_sample_new_noise_op(self):\n added_noises = []\n for noise in self.noise:\n added_noises.append(tf1.assign(noise, tf.random.normal(shape=\n noise.shape, stddev=self.stddev, dtype=tf.float32)))\n return tf.group(*added_noises)\n\n def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):\n if self.framework == 'tf':\n if override and self.weights_are_currently_noisy:\n tf_sess.run(self.tf_remove_noise_op)\n tf_sess.run(self.tf_sample_new_noise_and_add_op)\n else:\n if override and self.weights_are_currently_noisy:\n self._remove_noise()\n self._sample_new_noise()\n self._add_stored_noise()\n self.weights_are_currently_noisy = True\n\n def _add_stored_noise(self, *, tf_sess=None):\n \"\"\"Adds the stored `self.noise` to the model's parameters.\n\n Note: No new sampling of noise here.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to add the\n stored noise to the (currently noise-free) weights.\n override: If True, undo any currently applied noise first,\n then add the currently stored noise.\n \"\"\"\n assert self.weights_are_currently_noisy is False\n if self.framework == 'tf':\n tf_sess.run(self.tf_add_stored_noise_op)\n elif self.framework == 'tf2':\n self._tf_add_stored_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = True\n\n def _tf_add_stored_noise_op(self):\n \"\"\"Generates tf-op that assigns the stored noise to weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to apply the already stored noise to the NN.\n \"\"\"\n add_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n add_noise_ops.append(tf1.assign_add(var, noise))\n ret = tf.group(*tuple(add_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n def _remove_noise(self, *, tf_sess=None):\n \"\"\"\n Removes the current action noise from the model parameters.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to remove\n the noise from the (currently noisy) weights.\n \"\"\"\n assert self.weights_are_currently_noisy is True\n if self.framework == 'tf':\n tf_sess.run(self.tf_remove_noise_op)\n elif self.framework == 'tf2':\n self._tf_remove_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(-noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = False\n\n def _tf_remove_noise_op(self):\n \"\"\"Generates a tf-op for removing noise from the model's weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to remve the currently stored noise from the NN.\n \"\"\"\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n @override(Exploration)\n def get_state(self, sess=None):\n return {'cur_stddev': self.stddev_val}\n\n @override(Exploration)\n def set_state(self, state: dict, sess: Optional['tf.Session']=None) ->None:\n self.stddev_val = state['cur_stddev']\n if self.framework == 'tf':\n self.stddev.load(self.stddev_val, session=sess)\n elif isinstance(self.stddev, float):\n self.stddev = self.stddev_val\n else:\n self.stddev.assign(self.stddev_val)\n",
"step-4": "from gymnasium.spaces import Box, Discrete\nimport numpy as np\nfrom typing import Optional, TYPE_CHECKING, Union\nfrom ray.rllib.env.base_env import BaseEnv\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic\nfrom ray.rllib.models.torch.torch_action_dist import TorchCategorical, TorchDeterministic\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.annotations import override, PublicAPI\nfrom ray.rllib.utils.exploration.exploration import Exploration\nfrom ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch\nfrom ray.rllib.utils.from_config import from_config\nfrom ray.rllib.utils.numpy import softmax, SMALL_NUMBER\nfrom ray.rllib.utils.typing import TensorType\nif TYPE_CHECKING:\n from ray.rllib.policy.policy import Policy\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\n@PublicAPI\nclass ParameterNoise(Exploration):\n \"\"\"An exploration that changes a Model's parameters.\n\n Implemented based on:\n [1] https://openai.com/research/better-exploration-with-parameter-noise\n [2] https://arxiv.org/pdf/1706.01905.pdf\n\n At the beginning of an episode, Gaussian noise is added to all weights\n of the model. At the end of the episode, the noise is undone and an action\n diff (pi-delta) is calculated, from which we determine the changes in the\n noise's stddev for the next episode.\n \"\"\"\n\n def __init__(self, action_space, *, framework: str, policy_config: dict,\n model: ModelV2, initial_stddev: float=1.0, random_timesteps: int=\n 10000, sub_exploration: Optional[dict]=None, **kwargs):\n \"\"\"Initializes a ParameterNoise Exploration object.\n\n Args:\n initial_stddev: The initial stddev to use for the noise.\n random_timesteps: The number of timesteps to act completely\n randomly (see [1]).\n sub_exploration: Optional sub-exploration config.\n None for auto-detection/setup.\n \"\"\"\n assert framework is not None\n super().__init__(action_space, policy_config=policy_config, model=\n model, framework=framework, **kwargs)\n self.stddev = get_variable(initial_stddev, framework=self.framework,\n tf_name='stddev')\n self.stddev_val = initial_stddev\n self.model_variables = [v for k, v in self.model.\n trainable_variables(as_dict=True).items() if 'LayerNorm' not in k]\n self.noise = []\n for var in self.model_variables:\n name_ = var.name.split(':')[0] + '_noisy' if var.name else ''\n self.noise.append(get_variable(np.zeros(var.shape, dtype=np.\n float32), framework=self.framework, tf_name=name_,\n torch_tensor=True, device=self.device))\n if self.framework == 'tf' and not tf.executing_eagerly():\n self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()\n self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()\n self.tf_remove_noise_op = self._tf_remove_noise_op()\n with tf1.control_dependencies([self.tf_sample_new_noise_op]):\n add_op = self._tf_add_stored_noise_op()\n with tf1.control_dependencies([add_op]):\n self.tf_sample_new_noise_and_add_op = tf.no_op()\n self.weights_are_currently_noisy = False\n if sub_exploration is None:\n if isinstance(self.action_space, Discrete):\n sub_exploration = {'type': 'EpsilonGreedy',\n 'epsilon_schedule': {'type': 'PiecewiseSchedule',\n 'endpoints': [(0, 1.0), (random_timesteps + 1, 1.0), (\n random_timesteps + 2, 0.01)], 'outside_value': 0.01}}\n elif isinstance(self.action_space, Box):\n sub_exploration = {'type': 'OrnsteinUhlenbeckNoise',\n 'random_timesteps': random_timesteps}\n else:\n raise NotImplementedError\n self.sub_exploration = from_config(Exploration, sub_exploration,\n framework=self.framework, action_space=self.action_space,\n policy_config=self.policy_config, model=self.model, **kwargs)\n self.episode_started = False\n\n @override(Exploration)\n def before_compute_actions(self, *, timestep: Optional[int]=None,\n explore: Optional[bool]=None, tf_sess: Optional['tf.Session']=None):\n explore = explore if explore is not None else self.policy_config[\n 'explore']\n if self.episode_started:\n self._delayed_on_episode_start(explore, tf_sess)\n if explore and not self.weights_are_currently_noisy:\n self._add_stored_noise(tf_sess=tf_sess)\n elif not explore and self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def get_exploration_action(self, *, action_distribution:\n ActionDistribution, timestep: Union[TensorType, int], explore:\n Union[TensorType, bool]):\n return self.sub_exploration.get_exploration_action(action_distribution\n =action_distribution, timestep=timestep, explore=explore)\n\n @override(Exploration)\n def on_episode_start(self, policy: 'Policy', *, environment: BaseEnv=\n None, episode: int=None, tf_sess: Optional['tf.Session']=None):\n self.episode_started = True\n\n def _delayed_on_episode_start(self, explore, tf_sess):\n if explore:\n self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)\n else:\n self._sample_new_noise(tf_sess=tf_sess)\n self.episode_started = False\n\n @override(Exploration)\n def on_episode_end(self, policy, *, environment=None, episode=None,\n tf_sess=None):\n if self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def postprocess_trajectory(self, policy: 'Policy', sample_batch:\n SampleBatch, tf_sess: Optional['tf.Session']=None):\n noisy_action_dist = noise_free_action_dist = None\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n else:\n raise NotImplementedError\n if self.weights_are_currently_noisy:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n _, _, fetches = policy.compute_actions_from_input_dict(input_dict=\n sample_batch, explore=not self.weights_are_currently_noisy)\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n if noisy_action_dist is None:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n delta = distance = None\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n distance = np.nanmean(np.sum(noise_free_action_dist * np.log(\n noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)\n ), 1))\n current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_epsilon']\n delta = -np.log(1 - current_epsilon + current_epsilon / self.\n action_space.n)\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)\n ):\n distance = np.sqrt(np.mean(np.square(noise_free_action_dist -\n noisy_action_dist)))\n current_scale = self.sub_exploration.get_state(sess=tf_sess)[\n 'cur_scale']\n delta = getattr(self.sub_exploration, 'ou_sigma', 0.2\n ) * current_scale\n if distance <= delta:\n self.stddev_val *= 1.01\n else:\n self.stddev_val /= 1.01\n self.set_state(self.get_state(), sess=tf_sess)\n return sample_batch\n\n def _sample_new_noise(self, *, tf_sess=None):\n \"\"\"Samples new noise and stores it in `self.noise`.\"\"\"\n if self.framework == 'tf':\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == 'tf2':\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(mean=torch.zeros(self.noise[i]\n .size()), std=self.stddev).to(self.device)\n\n def _tf_sample_new_noise_op(self):\n added_noises = []\n for noise in self.noise:\n added_noises.append(tf1.assign(noise, tf.random.normal(shape=\n noise.shape, stddev=self.stddev, dtype=tf.float32)))\n return tf.group(*added_noises)\n\n def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):\n if self.framework == 'tf':\n if override and self.weights_are_currently_noisy:\n tf_sess.run(self.tf_remove_noise_op)\n tf_sess.run(self.tf_sample_new_noise_and_add_op)\n else:\n if override and self.weights_are_currently_noisy:\n self._remove_noise()\n self._sample_new_noise()\n self._add_stored_noise()\n self.weights_are_currently_noisy = True\n\n def _add_stored_noise(self, *, tf_sess=None):\n \"\"\"Adds the stored `self.noise` to the model's parameters.\n\n Note: No new sampling of noise here.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to add the\n stored noise to the (currently noise-free) weights.\n override: If True, undo any currently applied noise first,\n then add the currently stored noise.\n \"\"\"\n assert self.weights_are_currently_noisy is False\n if self.framework == 'tf':\n tf_sess.run(self.tf_add_stored_noise_op)\n elif self.framework == 'tf2':\n self._tf_add_stored_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = True\n\n def _tf_add_stored_noise_op(self):\n \"\"\"Generates tf-op that assigns the stored noise to weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to apply the already stored noise to the NN.\n \"\"\"\n add_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n add_noise_ops.append(tf1.assign_add(var, noise))\n ret = tf.group(*tuple(add_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n def _remove_noise(self, *, tf_sess=None):\n \"\"\"\n Removes the current action noise from the model parameters.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to remove\n the noise from the (currently noisy) weights.\n \"\"\"\n assert self.weights_are_currently_noisy is True\n if self.framework == 'tf':\n tf_sess.run(self.tf_remove_noise_op)\n elif self.framework == 'tf2':\n self._tf_remove_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n var.requires_grad = False\n var.add_(-noise)\n var.requires_grad = True\n self.weights_are_currently_noisy = False\n\n def _tf_remove_noise_op(self):\n \"\"\"Generates a tf-op for removing noise from the model's weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to remve the currently stored noise from the NN.\n \"\"\"\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n @override(Exploration)\n def get_state(self, sess=None):\n return {'cur_stddev': self.stddev_val}\n\n @override(Exploration)\n def set_state(self, state: dict, sess: Optional['tf.Session']=None) ->None:\n self.stddev_val = state['cur_stddev']\n if self.framework == 'tf':\n self.stddev.load(self.stddev_val, session=sess)\n elif isinstance(self.stddev, float):\n self.stddev = self.stddev_val\n else:\n self.stddev.assign(self.stddev_val)\n",
"step-5": "from gymnasium.spaces import Box, Discrete\nimport numpy as np\nfrom typing import Optional, TYPE_CHECKING, Union\n\nfrom ray.rllib.env.base_env import BaseEnv\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic\nfrom ray.rllib.models.torch.torch_action_dist import (\n TorchCategorical,\n TorchDeterministic,\n)\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.annotations import override, PublicAPI\nfrom ray.rllib.utils.exploration.exploration import Exploration\nfrom ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch\nfrom ray.rllib.utils.from_config import from_config\nfrom ray.rllib.utils.numpy import softmax, SMALL_NUMBER\nfrom ray.rllib.utils.typing import TensorType\n\nif TYPE_CHECKING:\n from ray.rllib.policy.policy import Policy\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\n@PublicAPI\nclass ParameterNoise(Exploration):\n \"\"\"An exploration that changes a Model's parameters.\n\n Implemented based on:\n [1] https://openai.com/research/better-exploration-with-parameter-noise\n [2] https://arxiv.org/pdf/1706.01905.pdf\n\n At the beginning of an episode, Gaussian noise is added to all weights\n of the model. At the end of the episode, the noise is undone and an action\n diff (pi-delta) is calculated, from which we determine the changes in the\n noise's stddev for the next episode.\n \"\"\"\n\n def __init__(\n self,\n action_space,\n *,\n framework: str,\n policy_config: dict,\n model: ModelV2,\n initial_stddev: float = 1.0,\n random_timesteps: int = 10000,\n sub_exploration: Optional[dict] = None,\n **kwargs\n ):\n \"\"\"Initializes a ParameterNoise Exploration object.\n\n Args:\n initial_stddev: The initial stddev to use for the noise.\n random_timesteps: The number of timesteps to act completely\n randomly (see [1]).\n sub_exploration: Optional sub-exploration config.\n None for auto-detection/setup.\n \"\"\"\n assert framework is not None\n super().__init__(\n action_space,\n policy_config=policy_config,\n model=model,\n framework=framework,\n **kwargs\n )\n\n self.stddev = get_variable(\n initial_stddev, framework=self.framework, tf_name=\"stddev\"\n )\n self.stddev_val = initial_stddev # Out-of-graph tf value holder.\n\n # The weight variables of the Model where noise should be applied to.\n # This excludes any variable, whose name contains \"LayerNorm\" (those\n # are BatchNormalization layers, which should not be perturbed).\n self.model_variables = [\n v\n for k, v in self.model.trainable_variables(as_dict=True).items()\n if \"LayerNorm\" not in k\n ]\n # Our noise to be added to the weights. Each item in `self.noise`\n # corresponds to one Model variable and holding the Gaussian noise to\n # be added to that variable (weight).\n self.noise = []\n for var in self.model_variables:\n name_ = var.name.split(\":\")[0] + \"_noisy\" if var.name else \"\"\n self.noise.append(\n get_variable(\n np.zeros(var.shape, dtype=np.float32),\n framework=self.framework,\n tf_name=name_,\n torch_tensor=True,\n device=self.device,\n )\n )\n\n # tf-specific ops to sample, assign and remove noise.\n if self.framework == \"tf\" and not tf.executing_eagerly():\n self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()\n self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()\n self.tf_remove_noise_op = self._tf_remove_noise_op()\n # Create convenience sample+add op for tf.\n with tf1.control_dependencies([self.tf_sample_new_noise_op]):\n add_op = self._tf_add_stored_noise_op()\n with tf1.control_dependencies([add_op]):\n self.tf_sample_new_noise_and_add_op = tf.no_op()\n\n # Whether the Model's weights currently have noise added or not.\n self.weights_are_currently_noisy = False\n\n # Auto-detection of underlying exploration functionality.\n if sub_exploration is None:\n # For discrete action spaces, use an underlying EpsilonGreedy with\n # a special schedule.\n if isinstance(self.action_space, Discrete):\n sub_exploration = {\n \"type\": \"EpsilonGreedy\",\n \"epsilon_schedule\": {\n \"type\": \"PiecewiseSchedule\",\n # Step function (see [2]).\n \"endpoints\": [\n (0, 1.0),\n (random_timesteps + 1, 1.0),\n (random_timesteps + 2, 0.01),\n ],\n \"outside_value\": 0.01,\n },\n }\n elif isinstance(self.action_space, Box):\n sub_exploration = {\n \"type\": \"OrnsteinUhlenbeckNoise\",\n \"random_timesteps\": random_timesteps,\n }\n # TODO(sven): Implement for any action space.\n else:\n raise NotImplementedError\n\n self.sub_exploration = from_config(\n Exploration,\n sub_exploration,\n framework=self.framework,\n action_space=self.action_space,\n policy_config=self.policy_config,\n model=self.model,\n **kwargs\n )\n\n # Whether we need to call `self._delayed_on_episode_start` before\n # the forward pass.\n self.episode_started = False\n\n @override(Exploration)\n def before_compute_actions(\n self,\n *,\n timestep: Optional[int] = None,\n explore: Optional[bool] = None,\n tf_sess: Optional[\"tf.Session\"] = None\n ):\n explore = explore if explore is not None else self.policy_config[\"explore\"]\n\n # Is this the first forward pass in the new episode? If yes, do the\n # noise re-sampling and add to weights.\n if self.episode_started:\n self._delayed_on_episode_start(explore, tf_sess)\n\n # Add noise if necessary.\n if explore and not self.weights_are_currently_noisy:\n self._add_stored_noise(tf_sess=tf_sess)\n # Remove noise if necessary.\n elif not explore and self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def get_exploration_action(\n self,\n *,\n action_distribution: ActionDistribution,\n timestep: Union[TensorType, int],\n explore: Union[TensorType, bool]\n ):\n # Use our sub-exploration object to handle the final exploration\n # action (depends on the algo-type/action-space/etc..).\n return self.sub_exploration.get_exploration_action(\n action_distribution=action_distribution, timestep=timestep, explore=explore\n )\n\n @override(Exploration)\n def on_episode_start(\n self,\n policy: \"Policy\",\n *,\n environment: BaseEnv = None,\n episode: int = None,\n tf_sess: Optional[\"tf.Session\"] = None\n ):\n # We have to delay the noise-adding step by one forward call.\n # This is due to the fact that the optimizer does it's step right\n # after the episode was reset (and hence the noise was already added!).\n # We don't want to update into a noisy net.\n self.episode_started = True\n\n def _delayed_on_episode_start(self, explore, tf_sess):\n # Sample fresh noise and add to weights.\n if explore:\n self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)\n # Only sample, don't apply anything to the weights.\n else:\n self._sample_new_noise(tf_sess=tf_sess)\n self.episode_started = False\n\n @override(Exploration)\n def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):\n # Remove stored noise from weights (only if currently noisy).\n if self.weights_are_currently_noisy:\n self._remove_noise(tf_sess=tf_sess)\n\n @override(Exploration)\n def postprocess_trajectory(\n self,\n policy: \"Policy\",\n sample_batch: SampleBatch,\n tf_sess: Optional[\"tf.Session\"] = None,\n ):\n noisy_action_dist = noise_free_action_dist = None\n # Adjust the stddev depending on the action (pi)-distance.\n # Also see [1] for details.\n # TODO(sven): Find out whether this can be scrapped by simply using\n # the `sample_batch` to get the noisy/noise-free action dist.\n _, _, fetches = policy.compute_actions_from_input_dict(\n input_dict=sample_batch, explore=self.weights_are_currently_noisy\n )\n\n # Categorical case (e.g. DQN).\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n # Deterministic (Gaussian actions, e.g. DDPG).\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n else:\n raise NotImplementedError # TODO(sven): Other action-dist cases.\n\n if self.weights_are_currently_noisy:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n\n _, _, fetches = policy.compute_actions_from_input_dict(\n input_dict=sample_batch, explore=not self.weights_are_currently_noisy\n )\n\n # Categorical case (e.g. DQN).\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])\n # Deterministic (Gaussian actions, e.g. DDPG).\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):\n action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]\n\n if noisy_action_dist is None:\n noisy_action_dist = action_dist\n else:\n noise_free_action_dist = action_dist\n\n delta = distance = None\n # Categorical case (e.g. DQN).\n if issubclass(policy.dist_class, (Categorical, TorchCategorical)):\n # Calculate KL-divergence (DKL(clean||noisy)) according to [2].\n # TODO(sven): Allow KL-divergence to be calculated by our\n # Distribution classes (don't support off-graph/numpy yet).\n distance = np.nanmean(\n np.sum(\n noise_free_action_dist\n * np.log(\n noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)\n ),\n 1,\n )\n )\n current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[\n \"cur_epsilon\"\n ]\n delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)\n elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):\n # Calculate MSE between noisy and non-noisy output (see [2]).\n distance = np.sqrt(\n np.mean(np.square(noise_free_action_dist - noisy_action_dist))\n )\n current_scale = self.sub_exploration.get_state(sess=tf_sess)[\"cur_scale\"]\n delta = getattr(self.sub_exploration, \"ou_sigma\", 0.2) * current_scale\n\n # Adjust stddev according to the calculated action-distance.\n if distance <= delta:\n self.stddev_val *= 1.01\n else:\n self.stddev_val /= 1.01\n\n # Update our state (self.stddev and self.stddev_val).\n self.set_state(self.get_state(), sess=tf_sess)\n\n return sample_batch\n\n def _sample_new_noise(self, *, tf_sess=None):\n \"\"\"Samples new noise and stores it in `self.noise`.\"\"\"\n if self.framework == \"tf\":\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == \"tf2\":\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(\n mean=torch.zeros(self.noise[i].size()), std=self.stddev\n ).to(self.device)\n\n def _tf_sample_new_noise_op(self):\n added_noises = []\n for noise in self.noise:\n added_noises.append(\n tf1.assign(\n noise,\n tf.random.normal(\n shape=noise.shape, stddev=self.stddev, dtype=tf.float32\n ),\n )\n )\n return tf.group(*added_noises)\n\n def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):\n if self.framework == \"tf\":\n if override and self.weights_are_currently_noisy:\n tf_sess.run(self.tf_remove_noise_op)\n tf_sess.run(self.tf_sample_new_noise_and_add_op)\n else:\n if override and self.weights_are_currently_noisy:\n self._remove_noise()\n self._sample_new_noise()\n self._add_stored_noise()\n\n self.weights_are_currently_noisy = True\n\n def _add_stored_noise(self, *, tf_sess=None):\n \"\"\"Adds the stored `self.noise` to the model's parameters.\n\n Note: No new sampling of noise here.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to add the\n stored noise to the (currently noise-free) weights.\n override: If True, undo any currently applied noise first,\n then add the currently stored noise.\n \"\"\"\n # Make sure we only add noise to currently noise-free weights.\n assert self.weights_are_currently_noisy is False\n\n # Add stored noise to the model's parameters.\n if self.framework == \"tf\":\n tf_sess.run(self.tf_add_stored_noise_op)\n elif self.framework == \"tf2\":\n self._tf_add_stored_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n # Add noise to weights in-place.\n var.requires_grad = False\n var.add_(noise)\n var.requires_grad = True\n\n self.weights_are_currently_noisy = True\n\n def _tf_add_stored_noise_op(self):\n \"\"\"Generates tf-op that assigns the stored noise to weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to apply the already stored noise to the NN.\n \"\"\"\n add_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n add_noise_ops.append(tf1.assign_add(var, noise))\n ret = tf.group(*tuple(add_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n def _remove_noise(self, *, tf_sess=None):\n \"\"\"\n Removes the current action noise from the model parameters.\n\n Args:\n tf_sess (Optional[tf.Session]): The tf-session to use to remove\n the noise from the (currently noisy) weights.\n \"\"\"\n # Make sure we only remove noise iff currently noisy.\n assert self.weights_are_currently_noisy is True\n\n # Removes the stored noise from the model's parameters.\n if self.framework == \"tf\":\n tf_sess.run(self.tf_remove_noise_op)\n elif self.framework == \"tf2\":\n self._tf_remove_noise_op()\n else:\n for var, noise in zip(self.model_variables, self.noise):\n # Remove noise from weights in-place.\n var.requires_grad = False\n var.add_(-noise)\n var.requires_grad = True\n\n self.weights_are_currently_noisy = False\n\n def _tf_remove_noise_op(self):\n \"\"\"Generates a tf-op for removing noise from the model's weights.\n\n Also used by tf-eager.\n\n Returns:\n tf.op: The tf op to remve the currently stored noise from the NN.\n \"\"\"\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()\n\n @override(Exploration)\n def get_state(self, sess=None):\n return {\"cur_stddev\": self.stddev_val}\n\n @override(Exploration)\n def set_state(self, state: dict, sess: Optional[\"tf.Session\"] = None) -> None:\n self.stddev_val = state[\"cur_stddev\"]\n # Set self.stddev to calculated value.\n if self.framework == \"tf\":\n self.stddev.load(self.stddev_val, session=sess)\n elif isinstance(self.stddev, float):\n self.stddev = self.stddev_val\n else:\n self.stddev.assign(self.stddev_val)\n",
"step-ids": [
16,
17,
20,
21,
22
]
}
|
[
16,
17,
20,
21,
22
] |
<|reserved_special_token_0|>
def generate_str():
print(','.join(str(d) for d in data))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_str():
print(','.join(str(d) for d in data))
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(' '.join(parts))
def generate_str():
print(','.join(str(d) for d in data))
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
def combine(source, maxsize):
parts = []
size = 0
for part in source:
parts.append(part)
size += len(part)
if size > maxsize:
yield ''.join(parts)
parts = []
size = 0
yield ''.join(parts)
if __name__ == '__main__':
generate_str()
text = ','.join(sample())
print(text)
with open('combine.txt', 'w') as f:
for part in combine(sample(), 32768):
f.write(part)
<|reserved_special_token_1|>
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
data = ['ACME', 50, 91.1]
print(' '.join(parts))
def generate_str():
print(','.join(str(d) for d in data))
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
def combine(source, maxsize):
parts = []
size = 0
for part in source:
parts.append(part)
size += len(part)
if size > maxsize:
yield ''.join(parts)
parts = []
size = 0
yield ''.join(parts)
if __name__ == '__main__':
generate_str()
text = ','.join(sample())
print(text)
with open('combine.txt', 'w') as f:
for part in combine(sample(), 32768):
f.write(part)
<|reserved_special_token_1|>
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# auther : xiaojinsong(61627515@qq.com)
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
data = ['ACME', 50, 91.1]
print(' '.join(parts))
def generate_str():
print(','.join(str(d) for d in data))
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
def combine(source, maxsize):
parts = []
size = 0
for part in source:
parts.append(part)
size += len(part)
if size > maxsize:
yield ''.join(parts)
parts=[]
size = 0
yield ''.join(parts)
if __name__ == '__main__':
generate_str()
text = ','.join(sample())
print(text)
with open('combine.txt', 'w') as f:
for part in combine(sample(), 32768):
f.write(part)
|
flexible
|
{
"blob_id": "4ce1e802831f09e503d18fd287cb35400986e3c8",
"index": 8095,
"step-1": "<mask token>\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(' '.join(parts))\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\ndef combine(source, maxsize):\n parts = []\n size = 0\n for part in source:\n parts.append(part)\n size += len(part)\n if size > maxsize:\n yield ''.join(parts)\n parts = []\n size = 0\n yield ''.join(parts)\n\n\nif __name__ == '__main__':\n generate_str()\n text = ','.join(sample())\n print(text)\n with open('combine.txt', 'w') as f:\n for part in combine(sample(), 32768):\n f.write(part)\n",
"step-4": "parts = ['Is', 'Chicago', 'Not', 'Chicago?']\ndata = ['ACME', 50, 91.1]\nprint(' '.join(parts))\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\ndef combine(source, maxsize):\n parts = []\n size = 0\n for part in source:\n parts.append(part)\n size += len(part)\n if size > maxsize:\n yield ''.join(parts)\n parts = []\n size = 0\n yield ''.join(parts)\n\n\nif __name__ == '__main__':\n generate_str()\n text = ','.join(sample())\n print(text)\n with open('combine.txt', 'w') as f:\n for part in combine(sample(), 32768):\n f.write(part)\n",
"step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# auther : xiaojinsong(61627515@qq.com)\n\n\nparts = ['Is', 'Chicago', 'Not', 'Chicago?']\ndata = ['ACME', 50, 91.1]\nprint(' '.join(parts))\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\ndef combine(source, maxsize):\n parts = []\n size = 0\n for part in source:\n parts.append(part)\n size += len(part)\n if size > maxsize:\n yield ''.join(parts)\n parts=[]\n size = 0\n yield ''.join(parts)\n\n\nif __name__ == '__main__':\n generate_str()\n text = ','.join(sample())\n print(text)\n with open('combine.txt', 'w') as f:\n for part in combine(sample(), 32768):\n f.write(part)",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
# Generated by Django 3.0.1 on 2020-01-11 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20191230_2037'),
]
operations = [
migrations.AddField(
model_name='user',
name='circles',
field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'), ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG', 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리'),
),
migrations.AddField(
model_name='user',
name='department',
field=models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'), ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC', '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT', 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True, verbose_name='학과'),
),
migrations.AlterField(
model_name='user',
name='level',
field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2', 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3, max_length=18, verbose_name='등급'),
),
]
|
normal
|
{
"blob_id": "6aa762165dba891a3638d13862019dd342a7e05a",
"index": 7644,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0004_auto_20191230_2037')]\n operations = [migrations.AddField(model_name='user', name='circles',\n field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'),\n ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG',\n 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리')),\n migrations.AddField(model_name='user', name='department', field=\n models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'),\n ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC',\n '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT',\n 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True,\n verbose_name='학과')), migrations.AlterField(model_name='user', name=\n 'level', field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2',\n 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3,\n max_length=18, verbose_name='등급'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0004_auto_20191230_2037')]\n operations = [migrations.AddField(model_name='user', name='circles',\n field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'),\n ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG',\n 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리')),\n migrations.AddField(model_name='user', name='department', field=\n models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'),\n ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC',\n '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT',\n 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True,\n verbose_name='학과')), migrations.AlterField(model_name='user', name=\n 'level', field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2',\n 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3,\n max_length=18, verbose_name='등급'))]\n",
"step-5": "# Generated by Django 3.0.1 on 2020-01-11 19:59\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('users', '0004_auto_20191230_2037'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='user',\r\n name='circles',\r\n field=models.CharField(choices=[('NUX', 'NUXPIA'), ('NET', 'NET'), ('DOT', 'DOT-GABI'), ('IMA', 'IMAGINE'), ('PNN', 'P&N'), ('MEG', 'MEGA-BRAIN')], max_length=18, null=True, verbose_name='동아리'),\r\n ),\r\n migrations.AddField(\r\n model_name='user',\r\n name='department',\r\n field=models.CharField(choices=[('OTHERS', '학부생이 아님'), ('CS', '컴퓨터공학부'), ('DRON', '드론IOT시뮬레이션학부'), ('MED', '의과대학'), ('LIB', '문리과대학'), ('SOC', '사회과학대학'), ('ENG', '공과대학'), ('HEL', '보건의료융합대학'), ('BNIT', 'BNIT융합대학'), ('PHA', '약학대학')], max_length=24, null=True, verbose_name='학과'),\r\n ),\r\n migrations.AlterField(\r\n model_name='user',\r\n name='level',\r\n field=models.CharField(choices=[('3', 'Lv3_미인증사용자'), ('2', 'Lv2_인증사용자'), ('1', 'Lv1_관리자'), ('0', 'Lv0_개발자')], default=3, max_length=18, verbose_name='등급'),\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class GoldpriceSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
self.browser = webdriver.PhantomJS()
self.price = None
def parse(self, response):
self.browser.get(response.url)
self.price = float(self.browser.find_element_by_xpath(
'//*[@id="J_price"]').text)
def close(self, spider, reason):
hour = datetime.datetime.now().hour
if self.price != None:
if int(hour) < 22:
if self.price > 278 or self.price < 270:
from scrapy.mail import MailSender
mailer = MailSender(smtphost='smtp.163.com', mailfrom=
'18607970065@163.com', smtpuser=
'18607970065@163.com', smtppass='yan18779865344',
smtpport=25)
body = (
u"""
实时爬取的黄金价格为:
"""
+ str(self.price))
subject = u'爬取的黄金实时价格'
mailer.send(to=['363918226@qq.com'], subject=subject.
encode('utf-8'), body=body.encode('utf-8'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GoldpriceSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
self.browser = webdriver.PhantomJS()
self.price = None
def parse(self, response):
self.browser.get(response.url)
self.price = float(self.browser.find_element_by_xpath(
'//*[@id="J_price"]').text)
def close(self, spider, reason):
hour = datetime.datetime.now().hour
if self.price != None:
if int(hour) < 22:
if self.price > 278 or self.price < 270:
from scrapy.mail import MailSender
mailer = MailSender(smtphost='smtp.163.com', mailfrom=
'18607970065@163.com', smtpuser=
'18607970065@163.com', smtppass='yan18779865344',
smtpport=25)
body = (
u"""
实时爬取的黄金价格为:
"""
+ str(self.price))
subject = u'爬取的黄金实时价格'
mailer.send(to=['363918226@qq.com'], subject=subject.
encode('utf-8'), body=body.encode('utf-8'))
def __del__(self):
self.browser.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GoldpriceSpider(scrapy.Spider):
name = 'goldprice'
allowed_domains = ['g-banker.com']
start_urls = ['https://g-banker.com/']
def __init__(self):
self.browser = webdriver.PhantomJS()
self.price = None
def parse(self, response):
self.browser.get(response.url)
self.price = float(self.browser.find_element_by_xpath(
'//*[@id="J_price"]').text)
def close(self, spider, reason):
hour = datetime.datetime.now().hour
if self.price != None:
if int(hour) < 22:
if self.price > 278 or self.price < 270:
from scrapy.mail import MailSender
mailer = MailSender(smtphost='smtp.163.com', mailfrom=
'18607970065@163.com', smtpuser=
'18607970065@163.com', smtppass='yan18779865344',
smtpport=25)
body = (
u"""
实时爬取的黄金价格为:
"""
+ str(self.price))
subject = u'爬取的黄金实时价格'
mailer.send(to=['363918226@qq.com'], subject=subject.
encode('utf-8'), body=body.encode('utf-8'))
def __del__(self):
self.browser.close()
<|reserved_special_token_1|>
import scrapy
from selenium import webdriver
import datetime
class GoldpriceSpider(scrapy.Spider):
name = 'goldprice'
allowed_domains = ['g-banker.com']
start_urls = ['https://g-banker.com/']
def __init__(self):
self.browser = webdriver.PhantomJS()
self.price = None
def parse(self, response):
self.browser.get(response.url)
self.price = float(self.browser.find_element_by_xpath(
'//*[@id="J_price"]').text)
def close(self, spider, reason):
hour = datetime.datetime.now().hour
if self.price != None:
if int(hour) < 22:
if self.price > 278 or self.price < 270:
from scrapy.mail import MailSender
mailer = MailSender(smtphost='smtp.163.com', mailfrom=
'18607970065@163.com', smtpuser=
'18607970065@163.com', smtppass='yan18779865344',
smtpport=25)
body = (
u"""
实时爬取的黄金价格为:
"""
+ str(self.price))
subject = u'爬取的黄金实时价格'
mailer.send(to=['363918226@qq.com'], subject=subject.
encode('utf-8'), body=body.encode('utf-8'))
def __del__(self):
self.browser.close()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
import datetime
class GoldpriceSpider(scrapy.Spider):
name = 'goldprice'
allowed_domains = ['g-banker.com']
start_urls = ['https://g-banker.com/']
def __init__(self):
self.browser = webdriver.PhantomJS()
self.price = None
def parse(self, response):
# print response.text
self.browser.get(response.url)
self.price = float(self.browser.find_element_by_xpath('//*[@id="J_price"]').text)
def close(self,spider, reason):
hour = datetime.datetime.now().hour
if(self.price != None):
if int(hour) < 22:
if(self.price > 278 or self.price < 270):
from scrapy.mail import MailSender
# mailer = MailSender.from_settings(settings)# 出错了,没找到原因
mailer = MailSender(
smtphost = "smtp.163.com", # 发送邮件的服务器
mailfrom = "18607970065@163.com", # 邮件发送者
smtpuser = "18607970065@163.com", # 用户名
smtppass = "yan18779865344", # 发送邮箱的密码不是你注册时的密码,而是授权码!!!切记!
smtpport = 25 # 端口号
)
body = u"""
实时爬取的黄金价格为:
""" + str(self.price)
subject = u'爬取的黄金实时价格'
# 如果说发送的内容太过简单的话,很可能会被当做垃圾邮件给禁止发送。
mailer.send(to=["363918226@qq.com"], subject = subject.encode("utf-8"), body = body.encode("utf-8"))
def __del__(self):
self.browser.close()
|
flexible
|
{
"blob_id": "e59404149c739a40316ca16ab767cbc48aa9b685",
"index": 3526,
"step-1": "<mask token>\n\n\nclass GoldpriceSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self.browser = webdriver.PhantomJS()\n self.price = None\n\n def parse(self, response):\n self.browser.get(response.url)\n self.price = float(self.browser.find_element_by_xpath(\n '//*[@id=\"J_price\"]').text)\n\n def close(self, spider, reason):\n hour = datetime.datetime.now().hour\n if self.price != None:\n if int(hour) < 22:\n if self.price > 278 or self.price < 270:\n from scrapy.mail import MailSender\n mailer = MailSender(smtphost='smtp.163.com', mailfrom=\n '18607970065@163.com', smtpuser=\n '18607970065@163.com', smtppass='yan18779865344',\n smtpport=25)\n body = (\n u\"\"\"\n 实时爬取的黄金价格为:\n \"\"\"\n + str(self.price))\n subject = u'爬取的黄金实时价格'\n mailer.send(to=['363918226@qq.com'], subject=subject.\n encode('utf-8'), body=body.encode('utf-8'))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GoldpriceSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self.browser = webdriver.PhantomJS()\n self.price = None\n\n def parse(self, response):\n self.browser.get(response.url)\n self.price = float(self.browser.find_element_by_xpath(\n '//*[@id=\"J_price\"]').text)\n\n def close(self, spider, reason):\n hour = datetime.datetime.now().hour\n if self.price != None:\n if int(hour) < 22:\n if self.price > 278 or self.price < 270:\n from scrapy.mail import MailSender\n mailer = MailSender(smtphost='smtp.163.com', mailfrom=\n '18607970065@163.com', smtpuser=\n '18607970065@163.com', smtppass='yan18779865344',\n smtpport=25)\n body = (\n u\"\"\"\n 实时爬取的黄金价格为:\n \"\"\"\n + str(self.price))\n subject = u'爬取的黄金实时价格'\n mailer.send(to=['363918226@qq.com'], subject=subject.\n encode('utf-8'), body=body.encode('utf-8'))\n\n def __del__(self):\n self.browser.close()\n",
"step-3": "<mask token>\n\n\nclass GoldpriceSpider(scrapy.Spider):\n name = 'goldprice'\n allowed_domains = ['g-banker.com']\n start_urls = ['https://g-banker.com/']\n\n def __init__(self):\n self.browser = webdriver.PhantomJS()\n self.price = None\n\n def parse(self, response):\n self.browser.get(response.url)\n self.price = float(self.browser.find_element_by_xpath(\n '//*[@id=\"J_price\"]').text)\n\n def close(self, spider, reason):\n hour = datetime.datetime.now().hour\n if self.price != None:\n if int(hour) < 22:\n if self.price > 278 or self.price < 270:\n from scrapy.mail import MailSender\n mailer = MailSender(smtphost='smtp.163.com', mailfrom=\n '18607970065@163.com', smtpuser=\n '18607970065@163.com', smtppass='yan18779865344',\n smtpport=25)\n body = (\n u\"\"\"\n 实时爬取的黄金价格为:\n \"\"\"\n + str(self.price))\n subject = u'爬取的黄金实时价格'\n mailer.send(to=['363918226@qq.com'], subject=subject.\n encode('utf-8'), body=body.encode('utf-8'))\n\n def __del__(self):\n self.browser.close()\n",
"step-4": "import scrapy\nfrom selenium import webdriver\nimport datetime\n\n\nclass GoldpriceSpider(scrapy.Spider):\n name = 'goldprice'\n allowed_domains = ['g-banker.com']\n start_urls = ['https://g-banker.com/']\n\n def __init__(self):\n self.browser = webdriver.PhantomJS()\n self.price = None\n\n def parse(self, response):\n self.browser.get(response.url)\n self.price = float(self.browser.find_element_by_xpath(\n '//*[@id=\"J_price\"]').text)\n\n def close(self, spider, reason):\n hour = datetime.datetime.now().hour\n if self.price != None:\n if int(hour) < 22:\n if self.price > 278 or self.price < 270:\n from scrapy.mail import MailSender\n mailer = MailSender(smtphost='smtp.163.com', mailfrom=\n '18607970065@163.com', smtpuser=\n '18607970065@163.com', smtppass='yan18779865344',\n smtpport=25)\n body = (\n u\"\"\"\n 实时爬取的黄金价格为:\n \"\"\"\n + str(self.price))\n subject = u'爬取的黄金实时价格'\n mailer.send(to=['363918226@qq.com'], subject=subject.\n encode('utf-8'), body=body.encode('utf-8'))\n\n def __del__(self):\n self.browser.close()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom selenium import webdriver\nimport datetime\n\nclass GoldpriceSpider(scrapy.Spider):\n name = 'goldprice'\n allowed_domains = ['g-banker.com']\n start_urls = ['https://g-banker.com/']\n\n def __init__(self):\n self.browser = webdriver.PhantomJS()\n self.price = None\n\n def parse(self, response):\n # print response.text\n self.browser.get(response.url)\n self.price = float(self.browser.find_element_by_xpath('//*[@id=\"J_price\"]').text)\n\n\n def close(self,spider, reason):\n hour = datetime.datetime.now().hour\n if(self.price != None):\n if int(hour) < 22:\n if(self.price > 278 or self.price < 270):\n from scrapy.mail import MailSender\n # mailer = MailSender.from_settings(settings)# 出错了,没找到原因\n mailer = MailSender(\n smtphost = \"smtp.163.com\", # 发送邮件的服务器\n mailfrom = \"18607970065@163.com\", # 邮件发送者\n smtpuser = \"18607970065@163.com\", # 用户名\n smtppass = \"yan18779865344\", # 发送邮箱的密码不是你注册时的密码,而是授权码!!!切记!\n smtpport = 25 # 端口号\n )\n\n body = u\"\"\"\n 实时爬取的黄金价格为:\n \"\"\" + str(self.price)\n subject = u'爬取的黄金实时价格'\n # 如果说发送的内容太过简单的话,很可能会被当做垃圾邮件给禁止发送。\n mailer.send(to=[\"363918226@qq.com\"], subject = subject.encode(\"utf-8\"), body = body.encode(\"utf-8\"))\n\n def __del__(self):\n self.browser.close()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
a = int(input('masukkan nilai = '))
if a > 60:
status = 'LULUS'
elif a <= 60:
status = 'TIDAK LULUS'
print(status)
ulang = input('apakah anda ingin mengulang? y/n = ')
<|reserved_special_token_1|>
ulang = 'y'
while True:
a = int(input('masukkan nilai = '))
if a > 60:
status = 'LULUS'
elif a <= 60:
status = 'TIDAK LULUS'
print(status)
ulang = input('apakah anda ingin mengulang? y/n = ')
<|reserved_special_token_1|>
ulang = 'y'
while True :
a = int(input ("masukkan nilai = "))
if a > 60 :
status = "LULUS"
elif a <= 60 :
status = "TIDAK LULUS"
print(status)
ulang = input("apakah anda ingin mengulang? y/n = ")
|
flexible
|
{
"blob_id": "759b440bf436afbfb081cf55eeb4a0f075ed3e6d",
"index": 9577,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n a = int(input('masukkan nilai = '))\n if a > 60:\n status = 'LULUS'\n elif a <= 60:\n status = 'TIDAK LULUS'\n print(status)\n ulang = input('apakah anda ingin mengulang? y/n = ')\n",
"step-3": "ulang = 'y'\nwhile True:\n a = int(input('masukkan nilai = '))\n if a > 60:\n status = 'LULUS'\n elif a <= 60:\n status = 'TIDAK LULUS'\n print(status)\n ulang = input('apakah anda ingin mengulang? y/n = ')\n",
"step-4": "ulang = 'y'\r\nwhile True :\r\n\ta = int(input (\"masukkan nilai = \"))\r\n\r\n\tif a > 60 :\r\n\t\tstatus = \"LULUS\"\r\n\telif a <= 60 :\r\n\t\tstatus = \"TIDAK LULUS\"\r\n\tprint(status)\r\n\r\n\tulang = input(\"apakah anda ingin mengulang? y/n = \")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def fun1(fun):
return "Hai!!!! "+fun
def message():
return "How are you"
res = fun1(message())
print(res)
|
normal
|
{
"blob_id": "e9fff1fb0a79493d4d7f3417c7d554eb10a978a0",
"index": 6616,
"step-1": "<mask token>\n",
"step-2": "def fun1(fun):\n return 'Hai!!!! ' + fun\n\n\ndef message():\n return 'How are you'\n\n\n<mask token>\n",
"step-3": "def fun1(fun):\n return 'Hai!!!! ' + fun\n\n\ndef message():\n return 'How are you'\n\n\n<mask token>\nprint(res)\n",
"step-4": "def fun1(fun):\n return 'Hai!!!! ' + fun\n\n\ndef message():\n return 'How are you'\n\n\nres = fun1(message())\nprint(res)\n",
"step-5": "def fun1(fun):\r\n return \"Hai!!!! \"+fun\r\ndef message():\r\n return \"How are you\"\r\n\r\nres = fun1(message())\r\nprint(res)\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',
'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']
test_guides = test
if test == None:
test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',
'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',
'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',
'GCCGGAGGGGTTTGCACAGAAGG']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=0.7, val=0.1, test=0.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = ['Anderson', 'Ran']
test_studies = test
if test == None:
test_studies = ['Kim', 'Tsai', 'Cho']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n == 'a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
<|reserved_special_token_0|>
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':
torch.squeeze(item[1]).unsqueeze_(dim=0)}
return sample
def collate_fn(batch):
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)
) > 0 and sum(list(i['guide'].shape)) > 0:
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape),
i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
return output
<|reserved_special_token_0|>
def rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
pd.to_numeric(df.cleavage_freq, errors='coerce')
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict('records'):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(ranks.index(float(line['cleavage_freq'])) / len(
ranks))])
average_value.append(float(line['cleavage_freq']))
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=1 if torch.
cuda.is_available() else 0))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=1 if torch
.cuda.is_available() else 0))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',
target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']
) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)) > 0 and sum(list(one_hot
(line['target_sequence'], line['target_strand']).shape)
) > 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(label)])
average_value.append(label)
else:
q += 1
print(sum(list(torch.tensor([label]).shape)), sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)), sum(list(one_hot(
line['target_sequence'], line['target_strand']).shape))
)
print(q)
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=4))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=4))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
<|reserved_special_token_0|>
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= 0.01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= 0.01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres += 1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel,
thisoutput)
average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),
auc(lr_recall, lr_precision), pres / totalpres]
return average_values
def accuracy(labels, outputs, percent=0.1):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +
percent) >= loutputs[i]:
correct += 1
total += 1
return correct / total
<|reserved_special_token_0|>
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss += 1
correct += (predicted == labels).sum().item()
loss += crit(outputs, labels)
if logpath != None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
f.write(f'total: {total} correct: {correct}')
f.write(f'loss: {loss / totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 *
correct / total))
print(f'total: {total} correct: {correct}')
print(f'loss: {loss / totalloss}')
return 100 * correct / total
def getAllStudy():
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row['study_name']] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',
'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']
test_guides = test
if test == None:
test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',
'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',
'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',
'GCCGGAGGGGTTTGCACAGAAGG']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=0.7, val=0.1, test=0.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = ['Anderson', 'Ran']
test_studies = test
if test == None:
test_studies = ['Kim', 'Tsai', 'Cho']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n == 'a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
<|reserved_special_token_0|>
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':
torch.squeeze(item[1]).unsqueeze_(dim=0)}
return sample
def collate_fn(batch):
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)
) > 0 and sum(list(i['guide'].shape)) > 0:
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape),
i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
return output
<|reserved_special_token_0|>
def rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
pd.to_numeric(df.cleavage_freq, errors='coerce')
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict('records'):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(ranks.index(float(line['cleavage_freq'])) / len(
ranks))])
average_value.append(float(line['cleavage_freq']))
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=1 if torch.
cuda.is_available() else 0))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=1 if torch
.cuda.is_available() else 0))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',
target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']
) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)) > 0 and sum(list(one_hot
(line['target_sequence'], line['target_strand']).shape)
) > 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(label)])
average_value.append(label)
else:
q += 1
print(sum(list(torch.tensor([label]).shape)), sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)), sum(list(one_hot(
line['target_sequence'], line['target_strand']).shape))
)
print(q)
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=4))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=4))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
<|reserved_special_token_0|>
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= 0.01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= 0.01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres += 1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel,
thisoutput)
average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),
auc(lr_recall, lr_precision), pres / totalpres]
return average_values
def accuracy(labels, outputs, percent=0.1):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +
percent) >= loutputs[i]:
correct += 1
total += 1
return correct / total
<|reserved_special_token_0|>
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss += 1
correct += (predicted == labels).sum().item()
loss += crit(outputs, labels)
if logpath != None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
f.write(f'total: {total} correct: {correct}')
f.write(f'loss: {loss / totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 *
correct / total))
print(f'total: {total} correct: {correct}')
print(f'loss: {loss / totalloss}')
return 100 * correct / total
def getAllStudy():
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row['study_name']] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
<|reserved_special_token_0|>
def aboveandbelow(threshold):
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
above = 0
total = 0
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[
'cleavage_freq'] != '':
if float(row['cleavage_freq']) > threshold:
above += 1
total += 1
print(f'Above: {above / total}%. Below: {(total - above) / total}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',
'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']
test_guides = test
if test == None:
test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',
'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',
'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',
'GCCGGAGGGGTTTGCACAGAAGG']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=0.7, val=0.1, test=0.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = ['Anderson', 'Ran']
test_studies = test
if test == None:
test_studies = ['Kim', 'Tsai', 'Cho']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n == 'a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
<|reserved_special_token_0|>
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':
torch.squeeze(item[1]).unsqueeze_(dim=0)}
return sample
def collate_fn(batch):
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)
) > 0 and sum(list(i['guide'].shape)) > 0:
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape),
i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
return output
<|reserved_special_token_0|>
def rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
pd.to_numeric(df.cleavage_freq, errors='coerce')
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict('records'):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(ranks.index(float(line['cleavage_freq'])) / len(
ranks))])
average_value.append(float(line['cleavage_freq']))
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=1 if torch.
cuda.is_available() else 0))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=1 if torch
.cuda.is_available() else 0))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',
target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']
) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)) > 0 and sum(list(one_hot
(line['target_sequence'], line['target_strand']).shape)
) > 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(label)])
average_value.append(label)
else:
q += 1
print(sum(list(torch.tensor([label]).shape)), sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)), sum(list(one_hot(
line['target_sequence'], line['target_strand']).shape))
)
print(q)
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=4))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=4))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
<|reserved_special_token_0|>
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= 0.01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= 0.01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres += 1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel,
thisoutput)
average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),
auc(lr_recall, lr_precision), pres / totalpres]
return average_values
def accuracy(labels, outputs, percent=0.1):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +
percent) >= loutputs[i]:
correct += 1
total += 1
return correct / total
def percentError(outputs, labels):
return torch.mean(torch.abs(labels - outputs) / labels)
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss += 1
correct += (predicted == labels).sum().item()
loss += crit(outputs, labels)
if logpath != None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
f.write(f'total: {total} correct: {correct}')
f.write(f'loss: {loss / totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 *
correct / total))
print(f'total: {total} correct: {correct}')
print(f'loss: {loss / totalloss}')
return 100 * correct / total
def getAllStudy():
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row['study_name']] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def getallGuide():
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:
try:
alls[row['grna_target_sequence']].add(row[
'target_sequence'])
except KeyError:
alls[row['grna_target_sequence']] = set(row[
'target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def aboveandbelow(threshold):
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
above = 0
total = 0
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[
'cleavage_freq'] != '':
if float(row['cleavage_freq']) > threshold:
above += 1
total += 1
print(f'Above: {above / total}%. Below: {(total - above) / total}')
def NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net,
device, optim_time=None, logpath=None):
net.to(device)
criterion = crit
optimizer = optim
full_full_labels = None
for i, data in enumerate(train_data, 0):
if full_full_labels == None:
full_full_labels = data[1].to(device)
else:
full_full_labels = torch.cat((full_full_labels, data[1].to(
device)), 0)
full_val_labels = None
for i, data in enumerate(val_data, 0):
if full_val_labels == None:
full_val_labels = data[1].to(device)
else:
full_val_labels = torch.cat((full_val_labels, data[1].to(device
)), 0)
print('begin training')
if logpath != None:
f = open(logpath, 'w')
best = 15
bestval = 15
bestepoch = 0
e = 0
times = list()
for q in optim_time:
optimizer = q[1]
print(q[0])
for epoch in range(q[0]):
ftime = time.monotonic()
random.shuffle(train_data)
correct = 0
total = 0
running_loss = 0.0
net.train()
full_output = None
full_labels = None
full_full_output = None
for i, data in enumerate(train_data, 0):
inputs, labels = data[0], data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
running_loss += loss.item()
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
if full_labels == None:
full_labels = labels
else:
full_labels = torch.cat((full_labels, labels), 0)
w = {'loss': loss.item(), 'accuracy': accuracy(labels,
outputs), 'percent error': percentError(outputs, labels)}
wandb.log(w)
if i % batch_per == batch_per - 1:
print('[%d, %5d] loss: %.3f' % (e + 1, i + 1,
running_loss / batch_per))
wl = roc(full_labels, full_output)
wandlog = {}
for q in wl:
wandlog[f'midepoch ROC_AUC'] = wl[q][0]
wandlog[f'midepoch PR_AUC'] = wl[q][1]
wandlog[f'midepoch threshhold accuracy'] = wl[q][2]
w.update({'midepoch loss': loss.item(),
'midepoch accuracy': accuracy(labels, outputs),
'midepoch percent error': percentError(outputs,
labels)})
wandb.log(w)
wandb.log(wandlog)
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output,
full_output), 0)
full_output = None
full_labels = None
running_loss = 0
correct = 0
total = 0
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0
)
wl = roc(full_full_labels, full_full_output)
w = {}
for q in wl:
w[f'epoch ROC_AUC'] = wl[q][0]
w[f'epoch PR_AUC'] = wl[q][1]
w[f'epoch threshhold accuracy'] = wl[q][2]
w.update({'epoch loss': loss.item(), 'epoch accuracy': accuracy
(full_full_labels, full_full_output), 'epoch percent error':
percentError(full_full_output, full_full_labels), 'label':
labels.flatten()[0], 'output': outputs.flatten()[0]})
wandb.log(w)
if w['epoch accuracy'] == 1:
PATH = f'.accuracynet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch PR_AUC'] == 1:
PATH = f'.PRnet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch ROC_AUC'] == 1:
PATH = f'.ROCnet.pth'
torch.save(net.state_dict(), PATH)
full_output = None
full_full_output = None
running_loss = 0
correct = 0
total = 0
running_loss = 0
net.eval()
correct = 0
total = 0
if e % 10 == 9:
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
for i, data in enumerate(val_data, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
running_loss += loss.item()
total += labels.size(0)
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
print(
f'Validation loss for Epoch [{e + 1}]: {running_loss / total}')
wandlog = {}
if bestval <= running_loss / total:
e = e
else:
bestepoch = e
bestval = running_loss / total
running_loss = 0
correct = 0
total = 0
times.append(time.monotonic() - ftime)
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
print('time for epoch: ', times[-1], 'seconds')
if logpath != None:
f.write(f'time for epoch: {times[-1]}, seconds')
e += 1
print('Finished Training')
print('average time per epoch: ', sum(times) / len(times), 'seconds')
if logpath != None:
f.write('Finished Training')
f.write(f'average time per epoch: {sum(times) / len(times)} seconds')
f.close()
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',
'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']
test_guides = test
if test == None:
test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',
'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',
'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',
'GCCGGAGGGGTTTGCACAGAAGG']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=0.7, val=0.1, test=0.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = ['Anderson', 'Ran']
test_studies = test
if test == None:
test_studies = ['Kim', 'Tsai', 'Cho']
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1.0, 0.0])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n == 'a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
def dataLoader(file='crisprsql.csv', batch=64, mode='target'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor([float(line['cleavage_freq'])])])
average_value.append(float(line['cleavage_freq']))
if train == True:
dl.append(torch.utils.data.DataLoader(thisdata, batch, True,
num_workers=4 if torch.cuda.is_available() else 4))
print(thisdata[0][0][0].size())
train = False
else:
dl.append(torch.utils.data.DataLoader(thisdata, batch, False,
num_workers=4 if torch.cuda.is_available() else 4))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':
torch.squeeze(item[1]).unsqueeze_(dim=0)}
return sample
def collate_fn(batch):
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)
) > 0 and sum(list(i['guide'].shape)) > 0:
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape),
i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
return output
<|reserved_special_token_0|>
def rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
pd.to_numeric(df.cleavage_freq, errors='coerce')
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict('records'):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(ranks.index(float(line['cleavage_freq'])) / len(
ranks))])
average_value.append(float(line['cleavage_freq']))
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=1 if torch.
cuda.is_available() else 0))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=1 if torch
.cuda.is_available() else 0))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',
target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == 'study':
loadData = byStudy(d)
elif mode == 'guide':
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']
) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']
) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)) > 0 and sum(list(one_hot
(line['target_sequence'], line['target_strand']).shape)
) > 0:
thisdata.append([[one_hot(line['grna_target_sequence'],
line['grna_target_strand']), one_hot(line[
'target_sequence'], line['target_strand'])], torch.
tensor(label)])
average_value.append(label)
else:
q += 1
print(sum(list(torch.tensor([label]).shape)), sum(list(
one_hot(line['grna_target_sequence'], line[
'grna_target_strand']).shape)), sum(list(one_hot(
line['target_sequence'], line['target_strand']).shape))
)
print(q)
if train == True:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, True, collate_fn=collate_fn, num_workers=4))
train = False
else:
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),
batch, False, collate_fn=collate_fn, num_workers=4))
thisdata1 = list()
for i in range(int(len(thisdata) / batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[i * batch + j][0][0].unsqueeze_(0
).unsqueeze_(0)
twos = thisdata[i * batch + j][0][1].unsqueeze_(0
).unsqueeze_(0)
threes = thisdata[i * batch + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[i * batch + j][0][0].
unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[i * batch + j][0][1].
unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[i * batch + j][1].
unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
<|reserved_special_token_0|>
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= 0.01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= 0.01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres += 1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel,
thisoutput)
average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),
auc(lr_recall, lr_precision), pres / totalpres]
return average_values
def accuracy(labels, outputs, percent=0.1):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +
percent) >= loutputs[i]:
correct += 1
total += 1
return correct / total
def percentError(outputs, labels):
return torch.mean(torch.abs(labels - outputs) / labels)
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss += 1
correct += (predicted == labels).sum().item()
loss += crit(outputs, labels)
if logpath != None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
f.write(f'total: {total} correct: {correct}')
f.write(f'loss: {loss / totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 *
correct / total))
print(f'total: {total} correct: {correct}')
print(f'loss: {loss / totalloss}')
return 100 * correct / total
def getAllStudy():
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row['study_name']] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def getallGuide():
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:
try:
alls[row['grna_target_sequence']].add(row[
'target_sequence'])
except KeyError:
alls[row['grna_target_sequence']] = set(row[
'target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def aboveandbelow(threshold):
with open('crisprsql.csv') as f:
data = csv.DictReader(f)
alls = dict()
above = 0
total = 0
for row in data:
if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[
'cleavage_freq'] != '':
if float(row['cleavage_freq']) > threshold:
above += 1
total += 1
print(f'Above: {above / total}%. Below: {(total - above) / total}')
def NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net,
device, optim_time=None, logpath=None):
net.to(device)
criterion = crit
optimizer = optim
full_full_labels = None
for i, data in enumerate(train_data, 0):
if full_full_labels == None:
full_full_labels = data[1].to(device)
else:
full_full_labels = torch.cat((full_full_labels, data[1].to(
device)), 0)
full_val_labels = None
for i, data in enumerate(val_data, 0):
if full_val_labels == None:
full_val_labels = data[1].to(device)
else:
full_val_labels = torch.cat((full_val_labels, data[1].to(device
)), 0)
print('begin training')
if logpath != None:
f = open(logpath, 'w')
best = 15
bestval = 15
bestepoch = 0
e = 0
times = list()
for q in optim_time:
optimizer = q[1]
print(q[0])
for epoch in range(q[0]):
ftime = time.monotonic()
random.shuffle(train_data)
correct = 0
total = 0
running_loss = 0.0
net.train()
full_output = None
full_labels = None
full_full_output = None
for i, data in enumerate(train_data, 0):
inputs, labels = data[0], data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
running_loss += loss.item()
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
if full_labels == None:
full_labels = labels
else:
full_labels = torch.cat((full_labels, labels), 0)
w = {'loss': loss.item(), 'accuracy': accuracy(labels,
outputs), 'percent error': percentError(outputs, labels)}
wandb.log(w)
if i % batch_per == batch_per - 1:
print('[%d, %5d] loss: %.3f' % (e + 1, i + 1,
running_loss / batch_per))
wl = roc(full_labels, full_output)
wandlog = {}
for q in wl:
wandlog[f'midepoch ROC_AUC'] = wl[q][0]
wandlog[f'midepoch PR_AUC'] = wl[q][1]
wandlog[f'midepoch threshhold accuracy'] = wl[q][2]
w.update({'midepoch loss': loss.item(),
'midepoch accuracy': accuracy(labels, outputs),
'midepoch percent error': percentError(outputs,
labels)})
wandb.log(w)
wandb.log(wandlog)
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output,
full_output), 0)
full_output = None
full_labels = None
running_loss = 0
correct = 0
total = 0
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0
)
wl = roc(full_full_labels, full_full_output)
w = {}
for q in wl:
w[f'epoch ROC_AUC'] = wl[q][0]
w[f'epoch PR_AUC'] = wl[q][1]
w[f'epoch threshhold accuracy'] = wl[q][2]
w.update({'epoch loss': loss.item(), 'epoch accuracy': accuracy
(full_full_labels, full_full_output), 'epoch percent error':
percentError(full_full_output, full_full_labels), 'label':
labels.flatten()[0], 'output': outputs.flatten()[0]})
wandb.log(w)
if w['epoch accuracy'] == 1:
PATH = f'.accuracynet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch PR_AUC'] == 1:
PATH = f'.PRnet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch ROC_AUC'] == 1:
PATH = f'.ROCnet.pth'
torch.save(net.state_dict(), PATH)
full_output = None
full_full_output = None
running_loss = 0
correct = 0
total = 0
running_loss = 0
net.eval()
correct = 0
total = 0
if e % 10 == 9:
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
for i, data in enumerate(val_data, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
running_loss += loss.item()
total += labels.size(0)
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
print(
f'Validation loss for Epoch [{e + 1}]: {running_loss / total}')
wandlog = {}
if bestval <= running_loss / total:
e = e
else:
bestepoch = e
bestval = running_loss / total
running_loss = 0
correct = 0
total = 0
times.append(time.monotonic() - ftime)
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
print('time for epoch: ', times[-1], 'seconds')
if logpath != None:
f.write(f'time for epoch: {times[-1]}, seconds')
e += 1
print('Finished Training')
print('average time per epoch: ', sum(times) / len(times), 'seconds')
if logpath != None:
f.write('Finished Training')
f.write(f'average time per epoch: {sum(times) / len(times)} seconds')
f.close()
return
def compute_dataframe(df: pd.DataFrame, checkpoint_path):
model = checkpoint_path
targets, targets_s, guides, guides_s = df.target_sequence.tolist(
), df.target_strand.tolist(), df.grna_target_sequence.tolist(
), df.grna_target_strand.tolist()
preds = []
for guide, target, guide_s, target_s in zip(guides, targets, guides_s,
targets_s):
pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
preds.append(pred.item())
df['pred'] = preds
return df
<|reserved_special_token_1|>
import random
import copy
random.seed(42)
import csv
import torch
import time
import statistics
import wandb
from model import Net, LinearRegression, LogisticRegression
def byGuide(data, val=None, test=None):
val_guides = val
if val == None:
val_guides = [
"GGGTGGGGGGAGTTTGCTCCTGG",
"GACCCCCTCCACCCCGCCTCCGG",
"GGCCTCCCCAAAGCCTGGCCAGG",
"GAACACAAAGCATAGACTGCGGG"
]
test_guides = test
if test==None:
test_guides = [
"GCAAAACTCAACCCTACCCCAGG",
"GGCCCAGACTGAGCACGTGATGG",
"GGGAAAGACCCAGCATCCGTGGG",
"GGAATCCCTTCTGCAGCACCTGG",
"GTGAGTGAGTGTGTGCGTGTGGG",
"GATGATGATGCCCCGGGCGTTGG",
"GCCGGAGGGGTTTGCACAGAAGG"
]
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1., 0.])
if pair['grna_target_sequence'] in val_guides:
val_set.append(pair)
elif pair['grna_target_sequence'] in test_guides:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def byTarget(data, train=.7, val=.1, test=.2):
random.shuffle(data)
train_set = []
val_set = []
test_set = []
for i in range(len(data)):
if i <= len(data) * train:
train_set.append(data[i])
elif i <= len(data) * (train + val):
val_set.append(data[i])
else:
test_set.append(data[i])
return [train_set, val_set, test_set]
def byStudy(data, val=None, test=None):
val_studies = val
if val == None:
val_studies = [
'Anderson',
'Ran',
]
test_studies = test
if test==None:
test_studies = [
'Kim',
'Tsai',
'Cho',
]
train_set = []
val_set = []
test_set = []
for pair in data:
pair['off'] = torch.tensor([1., 0.])
if pair['study_name'] in val_studies:
val_set.append(pair)
elif pair['study_name'] in test_studies:
test_set.append(pair)
else:
train_set.append(pair)
return [train_set, val_set, test_set]
def one_hot(data, sign='+'):
sins = None
sequence = None
data = data.lower()
for n in data:
one_hot = torch.zeros((1, 4))
if n =='a':
one_hot[0][0] = 1
elif n == 'c':
one_hot[0][1] = 1
elif n == 'g':
one_hot[0][2] = 1
elif n == 't':
one_hot[0][3] = 1
if sins == None:
sequence = copy.deepcopy(one_hot)
sins = 1
else:
sequence = torch.cat((sequence, one_hot), dim=0)
if list(sequence.size())[0] < 23:
for i in range(23 - list(sequence.size())[0]):
sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)
if list(sequence.size())[0] > 23:
sequence = sequence[:23]
if sign == '-':
sequence = torch.flip(sequence, [1])
return sequence
# import numpy as np
def dataLoader(file="crisprsql.csv", batch=64, mode="target"):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor([float(line['cleavage_freq'])])])
average_value.append(float(line['cleavage_freq']))
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(4 if torch.cuda.is_available() else 4)))
print(thisdata[0][0][0].size())
train = False
else:
dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(4 if torch.cuda.is_available() else 4)))
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
# from scipy.stats import rankdata
class CRISPRDataset(torch.utils.data.Dataset):
def __init__(self, thisdata):
self.thisdata = thisdata
def __len__(self):
return len(self.thisdata)
def __getitem__(self, idx):
item = self.thisdata[idx]
sample = {
# (23, 4)
'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),
'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0),
# (1)
'cfd': torch.squeeze(item[1]).unsqueeze_(dim=0)
}
return sample
def collate_fn(batch):
# (256, 23, 4)
# (256, 1)
# print(sum(list(batch[0]['cfd'].shape)), sum(list(batch[0]['target'].shape, sum(list(batch[0]['guide'].shape)))))
output = {}
b = {key: [] for key in batch[0].keys()}
for i in batch:
if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)) > 0 and sum(list(i['guide'].shape)) > 0 :
for key in i.keys():
b[key].append(i[key])
else:
print('1', sum(list(i['cfd'].shape)), i['cfd'])
print('2', sum(list(i['target'].shape)), len(i['target'].shape), i['target'].tolist())
print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))
for key in b.keys():
# print(b[key])s
if len(b[key]) > 0:
output[key] = torch.stack(b[key])
else:
output[key] = torch.tensor([])
# output = {
# key: torch.stack([batch[i][key] for i in range(len(batch)) \
# if all( len(batch[i][k].shape) > 0 for k in batch[0].keys() )
# ])
# for key in batch[0].keys()
# }
return output
import pandas as pd
def rankDataLoader(file="crisprsql.csv", batch=64, mode="target"):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
ranks = list()
for line in d:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
ranks.append(float(line['cleavage_freq']))
ranks.sort()
for t in range(3):
df = pd.DataFrame(loadData[t])
# df.drop(df.columns.difference(['cleavage_freq']), 1, inplace=True)
# pd.to_numeric(df['cleavage_freq']
pd.to_numeric(df.cleavage_freq, errors='coerce')
# cleave = df.cleavage_freq
# df_ = pd.DataFrame(loadData[t]).drop(['cleavage_freq'], 1, inplace=True)
# df_.join(cleave)
df.dropna(subset=['cleavage_freq'], inplace=True)
print(df.head())
average_value = list()
thisdata = list()
for line in df.to_dict("records"):
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor(ranks.index(float(line['cleavage_freq'])) / len(ranks))])
average_value.append(float(line['cleavage_freq']))
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))
# print(thisdata[0][0][0])
train = False
else:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))
# import pdb; pdb.set_trace()
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
def fullDataLoader(file="augmentcrisprsql.csv", batch=64, mode="target", target='rank'):
ftime = time.monotonic()
with open(file) as f:
d = list(csv.DictReader(f))
random.shuffle(d)
if mode == "study":
loadData = byStudy(d)
elif mode == "guide":
loadData = byGuide(d)
else:
loadData = byTarget(d)
data = list()
dl = list()
train = True
for t in range(3):
average_value = list()
thisdata = list()
q = 0
for line in loadData[t]:
if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:
if target == 'regular':
label = float(line['cleavage_freq'])
elif target == 'rank':
label = [float(line['ranked_cleavage_freq'])]
else:
label = [0, 1] if float(line['threshhold_cleavage_freq']) == 0 else [1, 0]
if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)) > 0 and sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)) > 0:
thisdata.append([
[one_hot(line['grna_target_sequence'], line['grna_target_strand']),
one_hot(line['target_sequence'], line["target_strand"])],
torch.tensor(label)])
average_value.append(label)
# print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)))
else:
q+=1
print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line["target_strand"]).shape)))
# print(torch.tensor([label), len(torch.tensor([label]).shape))
print(q)
# if line
# mode = 0
# zero = 0
# for p in average_value:
# if p == statistics.mode(average_value):
# mode+=1
# if p <0:
# zero+=1
# print(f"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\nMedian: {statistics.median(average_value)}.\nMode: {statistics.mode(average_value)} with {mode} datapoint.\nstandard deviation: {statistics.pstdev(average_value)}.\nlowest value: {min(average_value)}.\nHighest value: {max(average_value)}\n{zero} datapoints below zero\n\n")
if train == True:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=4))
# print(thisdata[0][0][0])
train = False
else:
# dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))
dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=4))
# import pdb; pdb.set_trace()
thisdata1 = list()
for i in range(int(len(thisdata)/batch)):
ones = None
twos = None
threes = None
for j in range(batch):
if ones == None:
ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)
twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)
threes = thisdata[(i * batch) + j][1].unsqueeze_(0)
else:
ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0)
twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0)
threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0)
thisdata1.append([[ones, twos], threes])
data.append(thisdata1)
print('time to load data: ', time.monotonic() - ftime, 'seconds')
return [data, dl]
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
def roc(labels, outputs):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
average_values = dict()
# print(len(llabels), len(loutputs))
for i in range(1, 2):
thislabel = list()
thisoutput = list()
pres = 0
totalpres = 0
for j in range(len(llabels)):
if llabels[j] <= .01 / i:
thislabel.append(0)
else:
thislabel.append(1)
if loutputs[j] <= .01 / i:
thisoutput.append(0)
else:
thisoutput.append(1)
if thislabel[-1] == thisoutput[-1]:
pres += 1
totalpres +=1
lr_precision, lr_recall, _ = precision_recall_curve(thislabel, thisoutput)
average_values[.1/i] = [roc_auc_score(thislabel, thisoutput), auc(lr_recall, lr_precision), pres/totalpres]
return average_values
def accuracy(labels, outputs, percent=.10):
llabels = labels.flatten().tolist()
loutputs = outputs.flatten().tolist()
correct = 0
total = 0
# print(llabels)
for i in range(len(llabels)):
if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 + percent) >= loutputs[i]:
correct +=1
total += 1
return correct / total
def percentError(outputs, labels):
return torch.mean(torch.abs(labels - outputs) / labels)
def Test(net, dataset, device, crit, logpath=None):
net.eval()
correct = 0
total = 0
totalloss = 0
loss = 0
with torch.no_grad():
for i, data in enumerate(dataset, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
totalloss+=1
correct += (predicted == labels).sum().item()
loss+=crit(outputs, labels)
if logpath!= None:
f = open(logpath, 'w')
f.write('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
f.write(f"total: {total} correct: {correct}")
f.write(f'loss: {loss/totalloss}')
f.close()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print(f"total: {total} correct: {correct}")
print(f'loss: {loss/totalloss}')
return 100 * correct / total
def getAllStudy():
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"]:
try:
alls[row['study_name']].add(row['grna_target_sequence'])
except KeyError:
alls[row["study_name"]] = set(row['grna_target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def getallGuide():
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"]:
try:
alls[row['grna_target_sequence']].add(row['target_sequence'])
except KeyError:
alls[row["grna_target_sequence"]] = set(row['target_sequence'])
for r in alls:
print(r)
print(alls[r])
print(len(alls[r]))
def aboveandbelow(threshold):
with open("crisprsql.csv") as f:
data = csv.DictReader(f)
alls = dict()
above = 0
total = 0
for row in data:
if row['grna_target_sequence'] not in ["C", 'G', 'A', "T"] and row['cleavage_freq'] != '':
if float(row['cleavage_freq']) > threshold:
above+=1
total+=1
print(f'Above: {above / total}%. Below: {(total - above) / total}')
def NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net, device, optim_time=None, logpath=None):
net.to(device)
#def optim, loss, and init graph data
criterion = crit
optimizer = optim
# get all labels for ROC
full_full_labels = None
for i, data in enumerate(train_data, 0):
if full_full_labels == None:
full_full_labels = data[1].to(device)
else:
full_full_labels = torch.cat((full_full_labels, data[1].to(device)), 0)
full_val_labels = None
for i, data in enumerate(val_data, 0):
if full_val_labels == None:
full_val_labels = data[1].to(device)
else:
full_val_labels = torch.cat((full_val_labels, data[1].to(device)), 0)
print("begin training")
if logpath!= None:
f = open(logpath, 'w')
#these go down, and random loss is ~2.303 so 15 will be replaced
best = 15
bestval = 15
bestepoch = 0
e = 0
# begin training loop, larget loop is for lr scedule
times = list()
# bestnet = LogisticRegression()
# bestnet.load_state_dict(copy.deepcopy(net.state_dict()))
for q in optim_time:
optimizer = q[1]
print(q[0])
# net.load_state_dict(copy.deepcopy(bestnet.state_dict())
# print(
# 'params', [p for p in net.parameters()],
# '\ngrads', [p.grad for p in net.parameters()]
# )
# epoch loop
for epoch in range(q[0]): # loop over the dataset multiple times
ftime = time.monotonic()
random.shuffle(train_data)
correct = 0
total = 0
running_loss = 0.0
# train mode
net.train()
full_output = None
full_labels = None
full_full_output = None
for i, data in enumerate(train_data, 0):
# train step
inputs, labels = data[0], data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
# t = time.monotonic()
outputs = net(inputs)
# print(time.monotonic - t, " seconds for 512 outputs")
loss = criterion(outputs, labels)
loss.backward()
# import pdb; pdb.set_trace()
# things to look at:
# - loss
# - parameters
# - inputs
# - grads
# if e % 300 == 299:
# print(
# 'loss', loss,
# # '\ninputs', inputs,
# '\nlabels', labels,
# '\noutputs', outputs
# )
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total+= labels.size(0)
correct += (predicted == labels).sum().item()
# print()
running_loss += loss.item()
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
if full_labels == None:
full_labels = labels
else:
full_labels = torch.cat((full_labels, labels), 0)
# w = {f'output {i}': outputs.flatten()[i] for i in range(outputs.flatten().size(0))}
# w.update({
# f'label {i}': labels.flatten()[i] for i in range(labels.flatten().size(0))
# })
w = ({'loss': loss.item(),
'accuracy': accuracy(labels, outputs),
'percent error': percentError(outputs, labels)})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
# print statistics
if i % batch_per == batch_per - 1: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(e + 1, i + 1, running_loss / batch_per))
# best = min(best, running_loss / batch_per)
# print('Accuracy of the network on the ' + str(batch_per) + 'th update: %d %%' % (
# 100 * correct / total))
wl = roc(full_labels, full_output)
wandlog = {}
for q in wl:
wandlog[f"midepoch ROC_AUC"] = wl[q][0]
wandlog[f"midepoch PR_AUC"] = wl[q][1]
wandlog[f"midepoch threshhold accuracy"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / batch_per,
# "TYPE": "TRAIN",
# 'EPOCH': e+1,
# 'UPDATE': (e*len(train_data)) + i + 1})
w.update({'midepoch loss': loss.item(),
'midepoch accuracy': accuracy(labels, outputs),
'midepoch percent error': percentError(outputs, labels)})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
wandb.log(wandlog)
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0)
full_output = None
full_labels = None
running_loss = 0
correct = 0
total = 0
# print('[%d] loss: %.20f' %
# (epoch + 1, running_loss / total))
# if logpath != None:
# f.write('[%d] loss: %.20f' %
# (epoch + 1, running_loss / total))
if full_full_output == None:
full_full_output = full_output
else:
full_full_output = torch.cat((full_full_output, full_output), 0)
# ROC is commented out when training on 10 samples
wl = roc(full_full_labels, full_full_output)
w = {}
for q in wl:
w[f"epoch ROC_AUC"] = wl[q][0]
w[f"epoch PR_AUC"] = wl[q][1]
w[f"epoch threshhold accuracy"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / batch_per,
# "TYPE": "TRAIN",
# 'EPOCH': e+1,
# 'UPDATE': (e + 1) *len(train_data)})
w.update({'epoch loss': loss.item(),
'epoch accuracy': accuracy(full_full_labels, full_full_output),
'epoch percent error': percentError(full_full_output, full_full_labels),
'label': labels.flatten()[0],
'output': outputs.flatten()[0]})
wandb.log(
# {
# 'loss': loss.item(),
# # 'params': [p for p in net.parameters()],
# # 'grads': [p.grad for p in net.parameters()],
# # 'inputs': inputs,
# f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),
# f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),
# 'accuracy': accuracy(labels, outputs)
# }
w
)
if w['epoch accuracy'] == 1:
PATH = f'.accuracynet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch PR_AUC'] == 1:
PATH = f'.PRnet.pth'
torch.save(net.state_dict(), PATH)
if w['epoch ROC_AUC'] == 1:
PATH = f'.ROCnet.pth'
torch.save(net.state_dict(), PATH)
# wandb.log(wandlog)
full_output = None
full_full_output = None
running_loss = 0
correct = 0
total = 0
running_loss = 0
net.eval()
correct = 0
total = 0
if e % 10 == 9:
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
#check val set
for i, data in enumerate(val_data, 0):
inputs, labels = data[0], data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
running_loss += loss.item()
total+= labels.size(0)
if full_output == None:
full_output = outputs
else:
full_output = torch.cat((full_output, outputs), 0)
# if e % 300 == 299:
print(f'Validation loss for Epoch [{e +1}]: {running_loss/total}')
# if logpath != None:
# f.write(f'Validation loss for Epoch [{epoch}]: {running_loss/total}')
# wl = roc(full_val_labels, full_output)
wandlog = {}
# for q in wl:
# wandlog[f"{q} ROC_AUC"] = wl[q][0]
# wandlog[f"{q} PR_AUC"] = wl[q][1]
# wandlog[f"{q} ACCURACY"] = wl[q][2]
# wandlog.update({
# "LOSS": running_loss / len(val_data),
# "TYPE": "VAL",
# 'EPOCH': e+1,
# 'UPDATE': (e + 1)*len(train_data)})
# wandb.log(wandlog)
# best = min(best, running_loss / total)
# early stop just goes to the next lr change checkpoint
if bestval <= running_loss / total:
# if epoch >= 5:
# print('Early Stop')
# print(f"Best Validation loss: {bestval}")
# print(f"Current Validation loss: {running_loss / total}")
e = e
# break
# continue
# return
else:
# bestnet.load_state_dict(copy.deepcopy(net.state_dict()))
bestepoch = e
bestval = running_loss / total
running_loss = 0
correct = 0
total = 0
times.append(time.monotonic() - ftime)
PATH = f'.net.pth'
torch.save(net.state_dict(), PATH)
# if e % 300 == 299:
print('time for epoch: ', times[-1], 'seconds')
if logpath != None:
f.write(f'time for epoch: {times[-1]}, seconds')
e+=1
# finish training. in future dont plot and save here just return them
print('Finished Training')
print('average time per epoch: ', sum(times)/len(times), 'seconds')
if logpath != None:
f.write('Finished Training')
f.write(f'average time per epoch: {sum(times)/len(times)} seconds')
f.close()
return
# def compute_dataframe(df: pd.DataFrame, checkpoint_path: str):
# model = LogisticRegression().load_state_dict(torch.load(checkpoint_path, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
# targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()
# preds = []
# for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):
# pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
# preds.append(pred.item())
# df['pred'] = preds
# return df
def compute_dataframe(df: pd.DataFrame, checkpoint_path):
model = checkpoint_path
targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()
preds = []
for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):
pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])
preds.append(pred.item())
df['pred'] = preds
return df
|
flexible
|
{
"blob_id": "a0059563b2eed4ca185a8e0971e8e0c80f5fb8f8",
"index": 6668,
"step-1": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\n<mask token>\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\n<mask token>\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\n<mask token>\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\n<mask token>\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\n<mask token>\n\n\ndef aboveandbelow(threshold):\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[\n 'cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above += 1\n total += 1\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\n<mask token>\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\ndef percentError(outputs, labels):\n return torch.mean(torch.abs(labels - outputs) / labels)\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef getallGuide():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['grna_target_sequence']].add(row[\n 'target_sequence'])\n except KeyError:\n alls[row['grna_target_sequence']] = set(row[\n 'target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef aboveandbelow(threshold):\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[\n 'cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above += 1\n total += 1\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\ndef NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net,\n device, optim_time=None, logpath=None):\n net.to(device)\n criterion = crit\n optimizer = optim\n full_full_labels = None\n for i, data in enumerate(train_data, 0):\n if full_full_labels == None:\n full_full_labels = data[1].to(device)\n else:\n full_full_labels = torch.cat((full_full_labels, data[1].to(\n device)), 0)\n full_val_labels = None\n for i, data in enumerate(val_data, 0):\n if full_val_labels == None:\n full_val_labels = data[1].to(device)\n else:\n full_val_labels = torch.cat((full_val_labels, data[1].to(device\n )), 0)\n print('begin training')\n if logpath != None:\n f = open(logpath, 'w')\n best = 15\n bestval = 15\n bestepoch = 0\n e = 0\n times = list()\n for q in optim_time:\n optimizer = q[1]\n print(q[0])\n for epoch in range(q[0]):\n ftime = time.monotonic()\n random.shuffle(train_data)\n correct = 0\n total = 0\n running_loss = 0.0\n net.train()\n full_output = None\n full_labels = None\n full_full_output = None\n for i, data in enumerate(train_data, 0):\n inputs, labels = data[0], data[1].to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n running_loss += loss.item()\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n if full_labels == None:\n full_labels = labels\n else:\n full_labels = torch.cat((full_labels, labels), 0)\n w = {'loss': loss.item(), 'accuracy': accuracy(labels,\n outputs), 'percent error': percentError(outputs, labels)}\n wandb.log(w)\n if i % batch_per == batch_per - 1:\n print('[%d, %5d] loss: %.3f' % (e + 1, i + 1, \n running_loss / batch_per))\n wl = roc(full_labels, full_output)\n wandlog = {}\n for q in wl:\n wandlog[f'midepoch ROC_AUC'] = wl[q][0]\n wandlog[f'midepoch PR_AUC'] = wl[q][1]\n wandlog[f'midepoch threshhold accuracy'] = wl[q][2]\n w.update({'midepoch loss': loss.item(),\n 'midepoch accuracy': accuracy(labels, outputs),\n 'midepoch percent error': percentError(outputs,\n labels)})\n wandb.log(w)\n wandb.log(wandlog)\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output,\n full_output), 0)\n full_output = None\n full_labels = None\n running_loss = 0\n correct = 0\n total = 0\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0\n )\n wl = roc(full_full_labels, full_full_output)\n w = {}\n for q in wl:\n w[f'epoch ROC_AUC'] = wl[q][0]\n w[f'epoch PR_AUC'] = wl[q][1]\n w[f'epoch threshhold accuracy'] = wl[q][2]\n w.update({'epoch loss': loss.item(), 'epoch accuracy': accuracy\n (full_full_labels, full_full_output), 'epoch percent error':\n percentError(full_full_output, full_full_labels), 'label':\n labels.flatten()[0], 'output': outputs.flatten()[0]})\n wandb.log(w)\n if w['epoch accuracy'] == 1:\n PATH = f'.accuracynet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch PR_AUC'] == 1:\n PATH = f'.PRnet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch ROC_AUC'] == 1:\n PATH = f'.ROCnet.pth'\n torch.save(net.state_dict(), PATH)\n full_output = None\n full_full_output = None\n running_loss = 0\n correct = 0\n total = 0\n running_loss = 0\n net.eval()\n correct = 0\n total = 0\n if e % 10 == 9:\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n for i, data in enumerate(val_data, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n running_loss += loss.item()\n total += labels.size(0)\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n print(\n f'Validation loss for Epoch [{e + 1}]: {running_loss / total}')\n wandlog = {}\n if bestval <= running_loss / total:\n e = e\n else:\n bestepoch = e\n bestval = running_loss / total\n running_loss = 0\n correct = 0\n total = 0\n times.append(time.monotonic() - ftime)\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n print('time for epoch: ', times[-1], 'seconds')\n if logpath != None:\n f.write(f'time for epoch: {times[-1]}, seconds')\n e += 1\n print('Finished Training')\n print('average time per epoch: ', sum(times) / len(times), 'seconds')\n if logpath != None:\n f.write('Finished Training')\n f.write(f'average time per epoch: {sum(times) / len(times)} seconds')\n f.close()\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = ['GGGTGGGGGGAGTTTGCTCCTGG', 'GACCCCCTCCACCCCGCCTCCGG',\n 'GGCCTCCCCAAAGCCTGGCCAGG', 'GAACACAAAGCATAGACTGCGGG']\n test_guides = test\n if test == None:\n test_guides = ['GCAAAACTCAACCCTACCCCAGG', 'GGCCCAGACTGAGCACGTGATGG',\n 'GGGAAAGACCCAGCATCCGTGGG', 'GGAATCCCTTCTGCAGCACCTGG',\n 'GTGAGTGAGTGTGTGCGTGTGGG', 'GATGATGATGCCCCGGGCGTTGG',\n 'GCCGGAGGGGTTTGCACAGAAGG']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef byTarget(data, train=0.7, val=0.1, test=0.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set]\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = ['Anderson', 'Ran']\n test_studies = test\n if test == None:\n test_studies = ['Kim', 'Tsai', 'Cho']\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1.0, 0.0])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else:\n train_set.append(pair)\n return [train_set, val_set, test_set]\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n one_hot = torch.zeros((1, 4))\n if n == 'a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0)\n if list(sequence.size())[0] > 23:\n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1])\n return sequence\n\n\ndef dataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor([float(line['cleavage_freq'])])])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, True,\n num_workers=4 if torch.cuda.is_available() else 4))\n print(thisdata[0][0][0].size())\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, False,\n num_workers=4 if torch.cuda.is_available() else 4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n\n def __init__(self, thisdata):\n self.thisdata = thisdata\n\n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0), 'cfd':\n torch.squeeze(item[1]).unsqueeze_(dim=0)}\n return sample\n\n\ndef collate_fn(batch):\n output = {}\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)\n ) > 0 and sum(list(i['guide'].shape)) > 0:\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape),\n i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n for key in b.keys():\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n return output\n\n\n<mask token>\n\n\ndef rankDataLoader(file='crisprsql.csv', batch=64, mode='target'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n df = pd.DataFrame(loadData[t])\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict('records'):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(ranks.index(float(line['cleavage_freq'])) / len(\n ranks))])\n average_value.append(float(line['cleavage_freq']))\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=1 if torch.\n cuda.is_available() else 0))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=1 if torch\n .cuda.is_available() else 0))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\ndef fullDataLoader(file='augmentcrisprsql.csv', batch=64, mode='target',\n target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == 'study':\n loadData = byStudy(d)\n elif mode == 'guide':\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']\n ) >= 0:\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']\n ) == 0 else [1, 0]\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)) > 0 and sum(list(one_hot\n (line['target_sequence'], line['target_strand']).shape)\n ) > 0:\n thisdata.append([[one_hot(line['grna_target_sequence'],\n line['grna_target_strand']), one_hot(line[\n 'target_sequence'], line['target_strand'])], torch.\n tensor(label)])\n average_value.append(label)\n else:\n q += 1\n print(sum(list(torch.tensor([label]).shape)), sum(list(\n one_hot(line['grna_target_sequence'], line[\n 'grna_target_strand']).shape)), sum(list(one_hot(\n line['target_sequence'], line['target_strand']).shape))\n )\n print(q)\n if train == True:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, True, collate_fn=collate_fn, num_workers=4))\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata),\n batch, False, collate_fn=collate_fn, num_workers=4))\n thisdata1 = list()\n for i in range(int(len(thisdata) / batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n if ones == None:\n ones = thisdata[i * batch + j][0][0].unsqueeze_(0\n ).unsqueeze_(0)\n twos = thisdata[i * batch + j][0][1].unsqueeze_(0\n ).unsqueeze_(0)\n threes = thisdata[i * batch + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[i * batch + j][0][0].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n twos = torch.cat((twos, thisdata[i * batch + j][0][1].\n unsqueeze_(0).unsqueeze_(0)), dim=0)\n threes = torch.cat((threes, thisdata[i * batch + j][1].\n unsqueeze_(0)), dim=0)\n thisdata1.append([[ones, twos], threes])\n data.append(thisdata1)\n print('time to load data: ', time.monotonic() - ftime, 'seconds')\n return [data, dl]\n\n\n<mask token>\n\n\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n if llabels[j] <= 0.01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1)\n if loutputs[j] <= 0.01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres += 1\n lr_precision, lr_recall, _ = precision_recall_curve(thislabel,\n thisoutput)\n average_values[0.1 / i] = [roc_auc_score(thislabel, thisoutput),\n auc(lr_recall, lr_precision), pres / totalpres]\n return average_values\n\n\ndef accuracy(labels, outputs, percent=0.1):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 +\n percent) >= loutputs[i]:\n correct += 1\n total += 1\n return correct / total\n\n\ndef percentError(outputs, labels):\n return torch.mean(torch.abs(labels - outputs) / labels)\n\n\ndef Test(net, dataset, device, crit, logpath=None):\n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss += 1\n correct += (predicted == labels).sum().item()\n loss += crit(outputs, labels)\n if logpath != None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' %\n (100 * correct / total))\n f.write(f'total: {total} correct: {correct}')\n f.write(f'loss: {loss / totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 *\n correct / total))\n print(f'total: {total} correct: {correct}')\n print(f'loss: {loss / totalloss}')\n return 100 * correct / total\n\n\ndef getAllStudy():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['study_name']].add(row['grna_target_sequence'])\n except KeyError:\n alls[row['study_name']] = set(row['grna_target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef getallGuide():\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T']:\n try:\n alls[row['grna_target_sequence']].add(row[\n 'target_sequence'])\n except KeyError:\n alls[row['grna_target_sequence']] = set(row[\n 'target_sequence'])\n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n\n\ndef aboveandbelow(threshold):\n with open('crisprsql.csv') as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in ['C', 'G', 'A', 'T'] and row[\n 'cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above += 1\n total += 1\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\ndef NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net,\n device, optim_time=None, logpath=None):\n net.to(device)\n criterion = crit\n optimizer = optim\n full_full_labels = None\n for i, data in enumerate(train_data, 0):\n if full_full_labels == None:\n full_full_labels = data[1].to(device)\n else:\n full_full_labels = torch.cat((full_full_labels, data[1].to(\n device)), 0)\n full_val_labels = None\n for i, data in enumerate(val_data, 0):\n if full_val_labels == None:\n full_val_labels = data[1].to(device)\n else:\n full_val_labels = torch.cat((full_val_labels, data[1].to(device\n )), 0)\n print('begin training')\n if logpath != None:\n f = open(logpath, 'w')\n best = 15\n bestval = 15\n bestepoch = 0\n e = 0\n times = list()\n for q in optim_time:\n optimizer = q[1]\n print(q[0])\n for epoch in range(q[0]):\n ftime = time.monotonic()\n random.shuffle(train_data)\n correct = 0\n total = 0\n running_loss = 0.0\n net.train()\n full_output = None\n full_labels = None\n full_full_output = None\n for i, data in enumerate(train_data, 0):\n inputs, labels = data[0], data[1].to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n running_loss += loss.item()\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n if full_labels == None:\n full_labels = labels\n else:\n full_labels = torch.cat((full_labels, labels), 0)\n w = {'loss': loss.item(), 'accuracy': accuracy(labels,\n outputs), 'percent error': percentError(outputs, labels)}\n wandb.log(w)\n if i % batch_per == batch_per - 1:\n print('[%d, %5d] loss: %.3f' % (e + 1, i + 1, \n running_loss / batch_per))\n wl = roc(full_labels, full_output)\n wandlog = {}\n for q in wl:\n wandlog[f'midepoch ROC_AUC'] = wl[q][0]\n wandlog[f'midepoch PR_AUC'] = wl[q][1]\n wandlog[f'midepoch threshhold accuracy'] = wl[q][2]\n w.update({'midepoch loss': loss.item(),\n 'midepoch accuracy': accuracy(labels, outputs),\n 'midepoch percent error': percentError(outputs,\n labels)})\n wandb.log(w)\n wandb.log(wandlog)\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output,\n full_output), 0)\n full_output = None\n full_labels = None\n running_loss = 0\n correct = 0\n total = 0\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0\n )\n wl = roc(full_full_labels, full_full_output)\n w = {}\n for q in wl:\n w[f'epoch ROC_AUC'] = wl[q][0]\n w[f'epoch PR_AUC'] = wl[q][1]\n w[f'epoch threshhold accuracy'] = wl[q][2]\n w.update({'epoch loss': loss.item(), 'epoch accuracy': accuracy\n (full_full_labels, full_full_output), 'epoch percent error':\n percentError(full_full_output, full_full_labels), 'label':\n labels.flatten()[0], 'output': outputs.flatten()[0]})\n wandb.log(w)\n if w['epoch accuracy'] == 1:\n PATH = f'.accuracynet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch PR_AUC'] == 1:\n PATH = f'.PRnet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch ROC_AUC'] == 1:\n PATH = f'.ROCnet.pth'\n torch.save(net.state_dict(), PATH)\n full_output = None\n full_full_output = None\n running_loss = 0\n correct = 0\n total = 0\n running_loss = 0\n net.eval()\n correct = 0\n total = 0\n if e % 10 == 9:\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n for i, data in enumerate(val_data, 0):\n inputs, labels = data[0], data[1].to(device)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n running_loss += loss.item()\n total += labels.size(0)\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n print(\n f'Validation loss for Epoch [{e + 1}]: {running_loss / total}')\n wandlog = {}\n if bestval <= running_loss / total:\n e = e\n else:\n bestepoch = e\n bestval = running_loss / total\n running_loss = 0\n correct = 0\n total = 0\n times.append(time.monotonic() - ftime)\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n print('time for epoch: ', times[-1], 'seconds')\n if logpath != None:\n f.write(f'time for epoch: {times[-1]}, seconds')\n e += 1\n print('Finished Training')\n print('average time per epoch: ', sum(times) / len(times), 'seconds')\n if logpath != None:\n f.write('Finished Training')\n f.write(f'average time per epoch: {sum(times) / len(times)} seconds')\n f.close()\n return\n\n\ndef compute_dataframe(df: pd.DataFrame, checkpoint_path):\n model = checkpoint_path\n targets, targets_s, guides, guides_s = df.target_sequence.tolist(\n ), df.target_strand.tolist(), df.grna_target_sequence.tolist(\n ), df.grna_target_strand.tolist()\n preds = []\n for guide, target, guide_s, target_s in zip(guides, targets, guides_s,\n targets_s):\n pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])\n preds.append(pred.item())\n df['pred'] = preds\n return df\n",
"step-5": "import random\nimport copy\nrandom.seed(42)\nimport csv\nimport torch\nimport time\nimport statistics\nimport wandb\nfrom model import Net, LinearRegression, LogisticRegression\n\ndef byGuide(data, val=None, test=None):\n val_guides = val\n if val == None:\n val_guides = [\n \"GGGTGGGGGGAGTTTGCTCCTGG\",\n \"GACCCCCTCCACCCCGCCTCCGG\",\n \"GGCCTCCCCAAAGCCTGGCCAGG\",\n \"GAACACAAAGCATAGACTGCGGG\"\n \n ]\n test_guides = test\n if test==None:\n test_guides = [\n \"GCAAAACTCAACCCTACCCCAGG\",\n \"GGCCCAGACTGAGCACGTGATGG\",\n \"GGGAAAGACCCAGCATCCGTGGG\",\n \"GGAATCCCTTCTGCAGCACCTGG\",\n \"GTGAGTGAGTGTGTGCGTGTGGG\",\n \"GATGATGATGCCCCGGGCGTTGG\",\n \"GCCGGAGGGGTTTGCACAGAAGG\"\n ]\n \n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1., 0.])\n if pair['grna_target_sequence'] in val_guides:\n val_set.append(pair)\n elif pair['grna_target_sequence'] in test_guides:\n test_set.append(pair)\n else: \n train_set.append(pair)\n return [train_set, val_set, test_set] \n\ndef byTarget(data, train=.7, val=.1, test=.2):\n random.shuffle(data)\n train_set = []\n val_set = []\n test_set = []\n for i in range(len(data)):\n if i <= len(data) * train:\n train_set.append(data[i])\n elif i <= len(data) * (train + val):\n val_set.append(data[i])\n else:\n test_set.append(data[i])\n return [train_set, val_set, test_set] \n\n\n\n\ndef byStudy(data, val=None, test=None):\n val_studies = val\n if val == None:\n val_studies = [\n 'Anderson',\n 'Ran',\n \n ]\n test_studies = test\n if test==None:\n test_studies = [\n 'Kim',\n 'Tsai',\n 'Cho',\n ]\n train_set = []\n val_set = []\n test_set = []\n for pair in data:\n pair['off'] = torch.tensor([1., 0.])\n if pair['study_name'] in val_studies:\n val_set.append(pair)\n elif pair['study_name'] in test_studies:\n test_set.append(pair)\n else: \n train_set.append(pair)\n return [train_set, val_set, test_set] \n\n\n\ndef one_hot(data, sign='+'):\n sins = None\n sequence = None\n data = data.lower()\n for n in data:\n \n one_hot = torch.zeros((1, 4))\n if n =='a':\n one_hot[0][0] = 1\n elif n == 'c':\n one_hot[0][1] = 1\n elif n == 'g':\n one_hot[0][2] = 1\n elif n == 't':\n one_hot[0][3] = 1\n if sins == None:\n sequence = copy.deepcopy(one_hot)\n sins = 1\n else:\n sequence = torch.cat((sequence, one_hot), dim=0)\n if list(sequence.size())[0] < 23:\n for i in range(23 - list(sequence.size())[0]):\n sequence = torch.cat((sequence, torch.zeros((1, 4))), dim=0) \n if list(sequence.size())[0] > 23: \n sequence = sequence[:23]\n if sign == '-':\n sequence = torch.flip(sequence, [1]) \n return sequence \n\n \n# import numpy as np\n\ndef dataLoader(file=\"crisprsql.csv\", batch=64, mode=\"target\"):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == \"study\":\n loadData = byStudy(d)\n elif mode == \"guide\":\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n average_value = list()\n thisdata = list()\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n thisdata.append([\n [one_hot(line['grna_target_sequence'], line['grna_target_strand']), \n one_hot(line['target_sequence'], line[\"target_strand\"])],\n torch.tensor([float(line['cleavage_freq'])])])\n average_value.append(float(line['cleavage_freq'])) \n # if line \n\n\n # mode = 0\n # zero = 0\n # for p in average_value:\n # if p == statistics.mode(average_value):\n # mode+=1\n # if p <0:\n # zero+=1 \n # print(f\"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\\nMedian: {statistics.median(average_value)}.\\nMode: {statistics.mode(average_value)} with {mode} datapoint.\\nstandard deviation: {statistics.pstdev(average_value)}.\\nlowest value: {min(average_value)}.\\nHighest value: {max(average_value)}\\n{zero} datapoints below zero\\n\\n\")\n if train == True:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(4 if torch.cuda.is_available() else 4)))\n print(thisdata[0][0][0].size())\n train = False\n else:\n dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(4 if torch.cuda.is_available() else 4)))\n \n thisdata1 = list() \n for i in range(int(len(thisdata)/batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n \n if ones == None:\n ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)\n twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)\n threes = thisdata[(i * batch) + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0) \n twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0) \n threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0) \n \n thisdata1.append([[ones, twos], threes]) \n\n\n data.append(thisdata1) \n \n print('time to load data: ', time.monotonic() - ftime, 'seconds') \n \n\n return [data, dl]\n\n# from scipy.stats import rankdata\n\nclass CRISPRDataset(torch.utils.data.Dataset):\n def __init__(self, thisdata):\n self.thisdata = thisdata\n \n def __len__(self):\n return len(self.thisdata)\n\n def __getitem__(self, idx):\n item = self.thisdata[idx]\n sample = {\n # (23, 4)\n 'target': torch.squeeze(item[0][1]).unsqueeze_(dim=0),\n 'guide': torch.squeeze(item[0][0]).unsqueeze_(dim=0),\n # (1)\n 'cfd': torch.squeeze(item[1]).unsqueeze_(dim=0)\n }\n return sample\n\n \ndef collate_fn(batch):\n # (256, 23, 4)\n # (256, 1)\n # print(sum(list(batch[0]['cfd'].shape)), sum(list(batch[0]['target'].shape, sum(list(batch[0]['guide'].shape)))))\n\n output = {}\n\n b = {key: [] for key in batch[0].keys()}\n for i in batch:\n if sum(list(i['cfd'].shape)) > 0 and sum(list(i['target'].shape)) > 0 and sum(list(i['guide'].shape)) > 0 :\n for key in i.keys():\n b[key].append(i[key])\n else:\n print('1', sum(list(i['cfd'].shape)), i['cfd'])\n print('2', sum(list(i['target'].shape)), len(i['target'].shape), i['target'].tolist())\n print('3', sum(list(i['guide'].shape)), len(i['guide'].shape))\n\n for key in b.keys():\n # print(b[key])s\n if len(b[key]) > 0:\n output[key] = torch.stack(b[key])\n else:\n output[key] = torch.tensor([])\n\n\n\n\n\n\n # output = {\n # key: torch.stack([batch[i][key] for i in range(len(batch)) \\\n # if all( len(batch[i][k].shape) > 0 for k in batch[0].keys() )\n # ])\n # for key in batch[0].keys()\n # }\n\n return output\nimport pandas as pd\n\ndef rankDataLoader(file=\"crisprsql.csv\", batch=64, mode=\"target\"):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n if mode == \"study\":\n loadData = byStudy(d)\n elif mode == \"guide\":\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n ranks = list()\n for line in d:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n ranks.append(float(line['cleavage_freq']))\n ranks.sort()\n for t in range(3):\n \n df = pd.DataFrame(loadData[t])\n\n # df.drop(df.columns.difference(['cleavage_freq']), 1, inplace=True)\n # pd.to_numeric(df['cleavage_freq']\n pd.to_numeric(df.cleavage_freq, errors='coerce')\n # cleave = df.cleavage_freq\n \n # df_ = pd.DataFrame(loadData[t]).drop(['cleavage_freq'], 1, inplace=True)\n # df_.join(cleave)\n df.dropna(subset=['cleavage_freq'], inplace=True)\n print(df.head())\n average_value = list()\n thisdata = list()\n for line in df.to_dict(\"records\"):\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n thisdata.append([\n [one_hot(line['grna_target_sequence'], line['grna_target_strand']), \n one_hot(line['target_sequence'], line[\"target_strand\"])],\n torch.tensor(ranks.index(float(line['cleavage_freq'])) / len(ranks))])\n average_value.append(float(line['cleavage_freq'])) \n # if line \n\n\n # mode = 0\n # zero = 0\n # for p in average_value:\n # if p == statistics.mode(average_value):\n # mode+=1\n # if p <0:\n # zero+=1 \n # print(f\"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\\nMedian: {statistics.median(average_value)}.\\nMode: {statistics.mode(average_value)} with {mode} datapoint.\\nstandard deviation: {statistics.pstdev(average_value)}.\\nlowest value: {min(average_value)}.\\nHighest value: {max(average_value)}\\n{zero} datapoints below zero\\n\\n\")\n if train == True:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))\n \n # print(thisdata[0][0][0])\n train = False\n else:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=(1 if torch.cuda.is_available() else 0)))\n # import pdb; pdb.set_trace()\n thisdata1 = list() \n for i in range(int(len(thisdata)/batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n \n if ones == None:\n ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)\n twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)\n threes = thisdata[(i * batch) + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0) \n twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0) \n threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0) \n \n thisdata1.append([[ones, twos], threes]) \n\n\n data.append(thisdata1) \n \n print('time to load data: ', time.monotonic() - ftime, 'seconds') \n \n return [data, dl]\n\n\n\n\n\n\n\ndef fullDataLoader(file=\"augmentcrisprsql.csv\", batch=64, mode=\"target\", target='rank'):\n ftime = time.monotonic()\n with open(file) as f:\n d = list(csv.DictReader(f))\n random.shuffle(d)\n if mode == \"study\":\n loadData = byStudy(d)\n elif mode == \"guide\":\n loadData = byGuide(d)\n else:\n loadData = byTarget(d)\n data = list()\n dl = list()\n train = True\n for t in range(3):\n \n average_value = list()\n thisdata = list()\n q = 0\n for line in loadData[t]:\n if line['cleavage_freq'] != '' and float(line['cleavage_freq']) >= 0:\n\n if target == 'regular':\n label = float(line['cleavage_freq'])\n elif target == 'rank':\n label = [float(line['ranked_cleavage_freq'])]\n else:\n label = [0, 1] if float(line['threshhold_cleavage_freq']) == 0 else [1, 0]\n\n if sum(list(torch.tensor([label]).shape)) > 0 and sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)) > 0 and sum(list(one_hot(line['target_sequence'], line[\"target_strand\"]).shape)) > 0:\n thisdata.append([\n [one_hot(line['grna_target_sequence'], line['grna_target_strand']), \n one_hot(line['target_sequence'], line[\"target_strand\"])],\n torch.tensor(label)])\n average_value.append(label)\n # print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line[\"target_strand\"]).shape)))\n \n else:\n q+=1\n print(sum(list(torch.tensor([label]).shape)), sum(list(one_hot(line['grna_target_sequence'], line['grna_target_strand']).shape)), sum(list(one_hot(line['target_sequence'], line[\"target_strand\"]).shape)))\n # print(torch.tensor([label), len(torch.tensor([label]).shape))\n print(q)\n # if line \n\n\n # mode = 0\n # zero = 0\n # for p in average_value:\n # if p == statistics.mode(average_value):\n # mode+=1\n # if p <0:\n # zero+=1 \n # print(f\"average CFD of {len(average_value)} datapoints in set {t + 1}: {sum(average_value)/len(average_value)}.\\nMedian: {statistics.median(average_value)}.\\nMode: {statistics.mode(average_value)} with {mode} datapoint.\\nstandard deviation: {statistics.pstdev(average_value)}.\\nlowest value: {min(average_value)}.\\nHighest value: {max(average_value)}\\n{zero} datapoints below zero\\n\\n\")\n if train == True:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, True, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, True, collate_fn=collate_fn, num_workers=4))\n \n # print(thisdata[0][0][0])\n train = False\n else:\n # dl.append(torch.utils.data.DataLoader(thisdata, batch, False, num_workers=(1 if torch.cuda.is_available() else 0)))\n dl.append(torch.utils.data.DataLoader(CRISPRDataset(thisdata), batch, False, collate_fn=collate_fn, num_workers=4))\n # import pdb; pdb.set_trace()\n thisdata1 = list() \n for i in range(int(len(thisdata)/batch)):\n ones = None\n twos = None\n threes = None\n for j in range(batch):\n \n if ones == None:\n ones = thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)\n twos = thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)\n threes = thisdata[(i * batch) + j][1].unsqueeze_(0)\n else:\n ones = torch.cat((ones, thisdata[(i * batch) + j][0][0].unsqueeze_(0).unsqueeze_(0)), dim=0) \n twos = torch.cat((twos, thisdata[(i * batch) + j][0][1].unsqueeze_(0).unsqueeze_(0)), dim=0) \n threes = torch.cat((threes, thisdata[(i * batch) + j][1].unsqueeze_(0)), dim=0) \n \n thisdata1.append([[ones, twos], threes]) \n\n\n data.append(thisdata1) \n\n print('time to load data: ', time.monotonic() - ftime, 'seconds') \n\n return [data, dl]\n\n\n\n\n\n\n\n\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import auc\ndef roc(labels, outputs):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n average_values = dict()\n # print(len(llabels), len(loutputs))\n for i in range(1, 2):\n thislabel = list()\n thisoutput = list()\n pres = 0\n totalpres = 0\n for j in range(len(llabels)):\n\n if llabels[j] <= .01 / i:\n thislabel.append(0)\n else:\n thislabel.append(1) \n if loutputs[j] <= .01 / i:\n thisoutput.append(0)\n else:\n thisoutput.append(1)\n if thislabel[-1] == thisoutput[-1]:\n pres += 1\n totalpres +=1 \n lr_precision, lr_recall, _ = precision_recall_curve(thislabel, thisoutput)\n average_values[.1/i] = [roc_auc_score(thislabel, thisoutput), auc(lr_recall, lr_precision), pres/totalpres]\n return average_values \n\n\ndef accuracy(labels, outputs, percent=.10):\n llabels = labels.flatten().tolist()\n loutputs = outputs.flatten().tolist()\n correct = 0\n total = 0\n # print(llabels)\n for i in range(len(llabels)):\n if llabels[i] * (1 - percent) <= loutputs[i] and llabels[i] * (1 + percent) >= loutputs[i]:\n correct +=1\n total += 1\n\n return correct / total \n\n\ndef percentError(outputs, labels):\n return torch.mean(torch.abs(labels - outputs) / labels)\n\n\n \n\n\n \n\n\ndef Test(net, dataset, device, crit, logpath=None):\n \n net.eval()\n correct = 0\n total = 0\n totalloss = 0\n loss = 0\n with torch.no_grad():\n for i, data in enumerate(dataset, 0):\n inputs, labels = data[0], data[1].to(device) \n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n totalloss+=1\n correct += (predicted == labels).sum().item()\n loss+=crit(outputs, labels)\n if logpath!= None:\n f = open(logpath, 'w')\n f.write('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n f.write(f\"total: {total} correct: {correct}\")\n f.write(f'loss: {loss/totalloss}')\n f.close()\n print('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n print(f\"total: {total} correct: {correct}\") \n print(f'loss: {loss/totalloss}')\n return 100 * correct / total \n\ndef getAllStudy():\n with open(\"crisprsql.csv\") as f:\n data = csv.DictReader(f)\n alls = dict()\n for row in data:\n if row['grna_target_sequence'] not in [\"C\", 'G', 'A', \"T\"]:\n try:\n alls[row['study_name']].add(row['grna_target_sequence']) \n except KeyError:\n alls[row[\"study_name\"]] = set(row['grna_target_sequence']) \n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n \n\ndef getallGuide():\n with open(\"crisprsql.csv\") as f:\n data = csv.DictReader(f)\n alls = dict()\n\n for row in data:\n if row['grna_target_sequence'] not in [\"C\", 'G', 'A', \"T\"]:\n try:\n alls[row['grna_target_sequence']].add(row['target_sequence']) \n except KeyError:\n alls[row[\"grna_target_sequence\"]] = set(row['target_sequence']) \n for r in alls:\n print(r)\n print(alls[r])\n print(len(alls[r]))\n \n\ndef aboveandbelow(threshold):\n with open(\"crisprsql.csv\") as f:\n data = csv.DictReader(f)\n alls = dict()\n above = 0\n total = 0\n for row in data:\n if row['grna_target_sequence'] not in [\"C\", 'G', 'A', \"T\"] and row['cleavage_freq'] != '':\n if float(row['cleavage_freq']) > threshold:\n above+=1\n total+=1\n \n\n print(f'Above: {above / total}%. Below: {(total - above) / total}')\n\n\n\n\n\n\n\ndef NewTrain(epochs, optim, crit, batch_per, train_data, val_data, net, device, optim_time=None, logpath=None):\n net.to(device)\n #def optim, loss, and init graph data\n criterion = crit\n optimizer = optim\n # get all labels for ROC\n full_full_labels = None\n for i, data in enumerate(train_data, 0):\n if full_full_labels == None:\n full_full_labels = data[1].to(device) \n else:\n full_full_labels = torch.cat((full_full_labels, data[1].to(device)), 0) \n full_val_labels = None \n for i, data in enumerate(val_data, 0):\n if full_val_labels == None:\n full_val_labels = data[1].to(device) \n else:\n full_val_labels = torch.cat((full_val_labels, data[1].to(device)), 0) \n print(\"begin training\")\n if logpath!= None:\n f = open(logpath, 'w')\n #these go down, and random loss is ~2.303 so 15 will be replaced\n best = 15\n bestval = 15\n bestepoch = 0\n e = 0\n # begin training loop, larget loop is for lr scedule\n times = list()\n # bestnet = LogisticRegression()\n # bestnet.load_state_dict(copy.deepcopy(net.state_dict()))\n for q in optim_time:\n optimizer = q[1]\n print(q[0])\n # net.load_state_dict(copy.deepcopy(bestnet.state_dict())\n # print(\n # 'params', [p for p in net.parameters()], \n # '\\ngrads', [p.grad for p in net.parameters()] \n # )\n # epoch loop\n for epoch in range(q[0]): # loop over the dataset multiple times\n ftime = time.monotonic()\n random.shuffle(train_data)\n correct = 0\n total = 0\n running_loss = 0.0\n # train mode\n net.train()\n full_output = None\n full_labels = None\n full_full_output = None\n \n for i, data in enumerate(train_data, 0):\n \n # train step\n inputs, labels = data[0], data[1].to(device) \n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n # t = time.monotonic()\n outputs = net(inputs)\n # print(time.monotonic - t, \" seconds for 512 outputs\")\n loss = criterion(outputs, labels)\n loss.backward()\n # import pdb; pdb.set_trace()\n # things to look at:\n # - loss\n # - parameters\n # - inputs\n # - grads\n # if e % 300 == 299:\n\n # print(\n # 'loss', loss, \n # # '\\ninputs', inputs,\n # '\\nlabels', labels,\n # '\\noutputs', outputs\n # )\n \n optimizer.step()\n _, predicted = torch.max(outputs.data, 1)\n total+= labels.size(0) \n correct += (predicted == labels).sum().item()\n # print()\n \n running_loss += loss.item()\n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0)\n\n if full_labels == None:\n full_labels = labels\n else:\n full_labels = torch.cat((full_labels, labels), 0) \n # w = {f'output {i}': outputs.flatten()[i] for i in range(outputs.flatten().size(0))}\n # w.update({\n # f'label {i}': labels.flatten()[i] for i in range(labels.flatten().size(0))\n # })\n w = ({'loss': loss.item(), \n 'accuracy': accuracy(labels, outputs),\n 'percent error': percentError(outputs, labels)})\n wandb.log(\n # {\n # 'loss': loss.item(), \n # # 'params': [p for p in net.parameters()], \n # # 'grads': [p.grad for p in net.parameters()], \n # # 'inputs': inputs,\n # f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),\n # f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),\n # 'accuracy': accuracy(labels, outputs)\n # }\n w\n )\n # print statistics\n if i % batch_per == batch_per - 1: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (e + 1, i + 1, running_loss / batch_per))\n # best = min(best, running_loss / batch_per)\n \n # print('Accuracy of the network on the ' + str(batch_per) + 'th update: %d %%' % (\n # 100 * correct / total))\n \n wl = roc(full_labels, full_output)\n wandlog = {}\n for q in wl:\n wandlog[f\"midepoch ROC_AUC\"] = wl[q][0]\n wandlog[f\"midepoch PR_AUC\"] = wl[q][1]\n wandlog[f\"midepoch threshhold accuracy\"] = wl[q][2]\n\n\n\n # wandlog.update({\n # \"LOSS\": running_loss / batch_per, \n # \"TYPE\": \"TRAIN\", \n # 'EPOCH': e+1, \n # 'UPDATE': (e*len(train_data)) + i + 1})\n w.update({'midepoch loss': loss.item(), \n 'midepoch accuracy': accuracy(labels, outputs),\n 'midepoch percent error': percentError(outputs, labels)})\n wandb.log(\n # {\n # 'loss': loss.item(), \n # # 'params': [p for p in net.parameters()], \n # # 'grads': [p.grad for p in net.parameters()], \n # # 'inputs': inputs,\n # f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),\n # f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),\n # 'accuracy': accuracy(labels, outputs)\n # }\n w\n )\n wandb.log(wandlog)\n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0) \n \n full_output = None\n full_labels = None\n\n\n running_loss = 0\n correct = 0\n total = 0\n # print('[%d] loss: %.20f' %\n # (epoch + 1, running_loss / total))\n # if logpath != None:\n # f.write('[%d] loss: %.20f' %\n # (epoch + 1, running_loss / total)) \n if full_full_output == None:\n full_full_output = full_output\n else:\n full_full_output = torch.cat((full_full_output, full_output), 0) \n # ROC is commented out when training on 10 samples\n wl = roc(full_full_labels, full_full_output)\n w = {}\n\n for q in wl:\n w[f\"epoch ROC_AUC\"] = wl[q][0]\n w[f\"epoch PR_AUC\"] = wl[q][1]\n w[f\"epoch threshhold accuracy\"] = wl[q][2]\n # wandlog.update({\n # \"LOSS\": running_loss / batch_per, \n # \"TYPE\": \"TRAIN\", \n # 'EPOCH': e+1, \n # 'UPDATE': (e + 1) *len(train_data)}) \n w.update({'epoch loss': loss.item(), \n 'epoch accuracy': accuracy(full_full_labels, full_full_output),\n 'epoch percent error': percentError(full_full_output, full_full_labels),\n 'label': labels.flatten()[0],\n 'output': outputs.flatten()[0]})\n wandb.log(\n # {\n # 'loss': loss.item(), \n # # 'params': [p for p in net.parameters()], \n # # 'grads': [p.grad for p in net.parameters()], \n # # 'inputs': inputs,\n # f'label {i}': labels.flatten()[i] for i in len(labels.flatten().size(0)),\n # f'output {i}': outputs.flatten()[i] for i in len(outputs.flatten().size(0)),\n # 'accuracy': accuracy(labels, outputs)\n # }\n w\n ) \n if w['epoch accuracy'] == 1:\n\n PATH = f'.accuracynet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch PR_AUC'] == 1:\n\n PATH = f'.PRnet.pth'\n torch.save(net.state_dict(), PATH)\n if w['epoch ROC_AUC'] == 1:\n\n PATH = f'.ROCnet.pth'\n torch.save(net.state_dict(), PATH)\n\n\n # wandb.log(wandlog) \n\n full_output = None\n full_full_output = None\n running_loss = 0\n correct = 0\n total = 0 \n running_loss = 0\n net.eval()\n correct = 0\n total = 0\n if e % 10 == 9:\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n #check val set\n for i, data in enumerate(val_data, 0):\n inputs, labels = data[0], data[1].to(device) \n outputs = net(inputs)\n loss = criterion(outputs, labels) \n loss.backward()\n running_loss += loss.item()\n total+= labels.size(0) \n if full_output == None:\n full_output = outputs\n else:\n full_output = torch.cat((full_output, outputs), 0) \n # if e % 300 == 299:\n print(f'Validation loss for Epoch [{e +1}]: {running_loss/total}') \n # if logpath != None:\n # f.write(f'Validation loss for Epoch [{epoch}]: {running_loss/total}') \n \n # wl = roc(full_val_labels, full_output)\n wandlog = {}\n # for q in wl:\n # wandlog[f\"{q} ROC_AUC\"] = wl[q][0]\n # wandlog[f\"{q} PR_AUC\"] = wl[q][1]\n # wandlog[f\"{q} ACCURACY\"] = wl[q][2]\n # wandlog.update({\n # \"LOSS\": running_loss / len(val_data), \n # \"TYPE\": \"VAL\", \n # 'EPOCH': e+1, \n # 'UPDATE': (e + 1)*len(train_data)}) \n # wandb.log(wandlog) \n # best = min(best, running_loss / total)\n # early stop just goes to the next lr change checkpoint\n \n if bestval <= running_loss / total:\n # if epoch >= 5:\n # print('Early Stop')\n # print(f\"Best Validation loss: {bestval}\")\n # print(f\"Current Validation loss: {running_loss / total}\")\n \n e = e\n # break\n # continue\n # return\n else:\n # bestnet.load_state_dict(copy.deepcopy(net.state_dict()))\n bestepoch = e\n bestval = running_loss / total\n\n running_loss = 0\n correct = 0\n total = 0\n times.append(time.monotonic() - ftime)\n PATH = f'.net.pth'\n torch.save(net.state_dict(), PATH)\n # if e % 300 == 299:\n print('time for epoch: ', times[-1], 'seconds')\n if logpath != None:\n f.write(f'time for epoch: {times[-1]}, seconds') \n e+=1\n \n\n\n\n\n # finish training. in future dont plot and save here just return them\n print('Finished Training')\n print('average time per epoch: ', sum(times)/len(times), 'seconds')\n if logpath != None:\n f.write('Finished Training')\n f.write(f'average time per epoch: {sum(times)/len(times)} seconds')\n f.close()\n \n return \n\n\n# def compute_dataframe(df: pd.DataFrame, checkpoint_path: str):\n# model = LogisticRegression().load_state_dict(torch.load(checkpoint_path, map_location=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")))\n# targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()\n# preds = []\n# for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):\n# pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])\n# preds.append(pred.item())\n# df['pred'] = preds\n# return df\n\ndef compute_dataframe(df: pd.DataFrame, checkpoint_path):\n model = checkpoint_path\n targets, targets_s, guides, guides_s = df.target_sequence.tolist(), df.target_strand.tolist(), df.grna_target_sequence.tolist(), df.grna_target_strand.tolist()\n preds = []\n for guide, target, guide_s, target_s in zip(guides, targets, guides_s, targets_s):\n pred = model([one_hot(guide, guide_s), one_hot(target, target_s)])\n preds.append(pred.item())\n df['pred'] = preds\n return df",
"step-ids": [
15,
16,
19,
21,
24
]
}
|
[
15,
16,
19,
21,
24
] |
# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction
# (up, down, left, right, or diagonally) in the 20×20 grid?
import numpy as np
data = np.genfromtxt("problem_11_matrix.txt", delimiter=" ")
# find greatest product horizontally
max_product_hor = 0
for i in range(0, len(data[0, :])-3):
for j in range(0, len(data[0, :])-3):
product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3]
if product_hor > max_product_hor:
max_product_hor = product_hor
# print("The greatest product horizontally is {}. " .format(max_product_hor))
# find greatest product vertically
max_product_ver = 0
for i in range(0, len(data[:, 0])-3):
for j in range(0, len(data[:, 0])-3):
product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
# print("The greatest product vertically is {}. " .format(max_product_ver))
# find greatest product diagonally
max_product_dia = 0
for j in range(0, len(data[0, :])-3):
for i in range(0, len(data[0, :])-3):
product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3]
if product_dia > max_product_dia:
max_product_dia = product_dia
# print("The greatest product diagonally is {}. " .format(max_product_dia))
max_product_dia_2 = 0
for j in range(0, len(data[0, :])-3):
for i in range(2, len(data[0, :])-1):
product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
# print("The greatest product diagonally is {}. " .format(max_product_dia_2))
max_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2)
print("The greatest product in the same direction is {}." .format(int(max_value)))
|
normal
|
{
"blob_id": "bacaaf5c91232d85f451c2c17a42cd2ec6966684",
"index": 1499,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\n<mask token>\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\n<mask token>\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\n<mask token>\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\n<mask token>\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n",
"step-3": "<mask token>\ndata = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')\nmax_product_hor = 0\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\nmax_product_dia = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\nmax_value = max(max_product_hor, max_product_ver, max_product_dia,\n max_product_dia_2)\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n",
"step-4": "import numpy as np\ndata = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')\nmax_product_hor = 0\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\nmax_product_dia = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\nmax_value = max(max_product_hor, max_product_ver, max_product_dia,\n max_product_dia_2)\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n",
"step-5": "# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.\n# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.\n# What is the greatest product of four adjacent numbers in the same direction\n# (up, down, left, right, or diagonally) in the 20×20 grid?\n\nimport numpy as np\ndata = np.genfromtxt(\"problem_11_matrix.txt\", delimiter=\" \")\n\n# find greatest product horizontally\nmax_product_hor = 0\nfor i in range(0, len(data[0, :])-3):\n for j in range(0, len(data[0, :])-3):\n product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\n# print(\"The greatest product horizontally is {}. \" .format(max_product_hor))\n\n# find greatest product vertically\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0])-3):\n for j in range(0, len(data[:, 0])-3):\n product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\n# print(\"The greatest product vertically is {}. \" .format(max_product_ver))\n\n# find greatest product diagonally\nmax_product_dia = 0\nfor j in range(0, len(data[0, :])-3):\n for i in range(0, len(data[0, :])-3):\n product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\n# print(\"The greatest product diagonally is {}. \" .format(max_product_dia))\n\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :])-3):\n for i in range(2, len(data[0, :])-1):\n product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\n# print(\"The greatest product diagonally is {}. \" .format(max_product_dia_2))\n\nmax_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2)\n\nprint(\"The greatest product in the same direction is {}.\" .format(int(max_value)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@application.route('/results', methods=['GET', 'POST'])
def get_results():
_logger_getting.warning('retrieving all student results')
data = Student.query.all()
_logger_getting.warning('the students results have been collected for {}'
.format(data))
return render_template('results.html', data=data)
@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])
def edit_student(student_id):
form = StudentForm()
data = Student.query.get_or_404(student_id)
return render_template('edit_results.html', data=data)
@application.route('/edit_results/<int:student_id>/update_results', methods
=['GET', 'PUT', 'POST'])
def update_results(student_id):
student_data = Student.query.get_or_404(student_id)
form = StudentForm()
if form.validate_on_submit():
student_data.name = form.name.data
student_data.physics = form.physics.data
student_data.maths = form.maths.data
student_data.chemistry = form.chemistry.data
db.session.commit()
return redirect(url_for('edit_student', student_id=student_data.id))
elif request.method == 'GET':
form.name.data = student_data.name
form.physics.data = student_data.physics
form.maths.data = student_data.maths
form.chemistry.data = student_data.chemistry
return render_template('update_page.html', form=form)
<|reserved_special_token_0|>
@application.route('/results/<int:indexId>', methods=['DELETE'])
def delete_student(indexId):
_logger_delete.warning('Inside Delete function')
student = Student.query.filter_by(id=indexId).first()
if not student:
_logger_delete.warning('No Students in database')
return jsonify({'message': 'No user found'})
db.session.delete(student)
_logger_delete.warning('Deleted Student {} and commit to database'.
format(student))
db.session.commit()
return jsonify({'message': 'Student found and Deleted'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@application.route('/', methods=['GET', 'POST'])
def add_results():
form = StudentForm()
_logger_adding.warning('Inside Add Results function')
_logger_adding.warning('Student form waiting for Input')
if form.validate_on_submit():
_logger_adding.warning('When form is submitted with data')
student = Student(name=form.name.data, physics=form.physics.data,
maths=form.maths.data, chemistry=form.chemistry.data)
_logger_adding.warning(
'Student: {} , physics: {} , maths: {}, chemistry: {}'.format(
form.name.data, form.physics.data, form.maths.data, form.
chemistry.data))
db.session.add(student)
_logger_adding.warning('student results was added to database')
db.session.commit()
_logger_adding.warning('database commit')
return redirect(url_for('add_results'))
else:
return render_template('home.html', form=form)
@application.route('/results', methods=['GET', 'POST'])
def get_results():
_logger_getting.warning('retrieving all student results')
data = Student.query.all()
_logger_getting.warning('the students results have been collected for {}'
.format(data))
return render_template('results.html', data=data)
@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])
def edit_student(student_id):
form = StudentForm()
data = Student.query.get_or_404(student_id)
return render_template('edit_results.html', data=data)
@application.route('/edit_results/<int:student_id>/update_results', methods
=['GET', 'PUT', 'POST'])
def update_results(student_id):
student_data = Student.query.get_or_404(student_id)
form = StudentForm()
if form.validate_on_submit():
student_data.name = form.name.data
student_data.physics = form.physics.data
student_data.maths = form.maths.data
student_data.chemistry = form.chemistry.data
db.session.commit()
return redirect(url_for('edit_student', student_id=student_data.id))
elif request.method == 'GET':
form.name.data = student_data.name
form.physics.data = student_data.physics
form.maths.data = student_data.maths
form.chemistry.data = student_data.chemistry
return render_template('update_page.html', form=form)
@application.route('/edit_results/<int:student_id>/delete', methods=['GET'])
def delete_post(student_id):
if request.method == 'GET':
student_results = Student.query.get_or_404(student_id)
db.session.delete(student_results)
db.session.commit()
return redirect(url_for('get_results'))
@application.route('/results/<int:indexId>', methods=['DELETE'])
def delete_student(indexId):
_logger_delete.warning('Inside Delete function')
student = Student.query.filter_by(id=indexId).first()
if not student:
_logger_delete.warning('No Students in database')
return jsonify({'message': 'No user found'})
db.session.delete(student)
_logger_delete.warning('Deleted Student {} and commit to database'.
format(student))
db.session.commit()
return jsonify({'message': 'Student found and Deleted'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_logger_adding = logging.getLogger('Adding results')
_logger_getting = logging.getLogger('Get results')
_logger_update = logging.getLogger('Update results')
_logger_delete = logging.getLogger('Delete results')
@application.route('/', methods=['GET', 'POST'])
def add_results():
form = StudentForm()
_logger_adding.warning('Inside Add Results function')
_logger_adding.warning('Student form waiting for Input')
if form.validate_on_submit():
_logger_adding.warning('When form is submitted with data')
student = Student(name=form.name.data, physics=form.physics.data,
maths=form.maths.data, chemistry=form.chemistry.data)
_logger_adding.warning(
'Student: {} , physics: {} , maths: {}, chemistry: {}'.format(
form.name.data, form.physics.data, form.maths.data, form.
chemistry.data))
db.session.add(student)
_logger_adding.warning('student results was added to database')
db.session.commit()
_logger_adding.warning('database commit')
return redirect(url_for('add_results'))
else:
return render_template('home.html', form=form)
@application.route('/results', methods=['GET', 'POST'])
def get_results():
_logger_getting.warning('retrieving all student results')
data = Student.query.all()
_logger_getting.warning('the students results have been collected for {}'
.format(data))
return render_template('results.html', data=data)
@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])
def edit_student(student_id):
form = StudentForm()
data = Student.query.get_or_404(student_id)
return render_template('edit_results.html', data=data)
@application.route('/edit_results/<int:student_id>/update_results', methods
=['GET', 'PUT', 'POST'])
def update_results(student_id):
student_data = Student.query.get_or_404(student_id)
form = StudentForm()
if form.validate_on_submit():
student_data.name = form.name.data
student_data.physics = form.physics.data
student_data.maths = form.maths.data
student_data.chemistry = form.chemistry.data
db.session.commit()
return redirect(url_for('edit_student', student_id=student_data.id))
elif request.method == 'GET':
form.name.data = student_data.name
form.physics.data = student_data.physics
form.maths.data = student_data.maths
form.chemistry.data = student_data.chemistry
return render_template('update_page.html', form=form)
@application.route('/edit_results/<int:student_id>/delete', methods=['GET'])
def delete_post(student_id):
if request.method == 'GET':
student_results = Student.query.get_or_404(student_id)
db.session.delete(student_results)
db.session.commit()
return redirect(url_for('get_results'))
@application.route('/results/<int:indexId>', methods=['DELETE'])
def delete_student(indexId):
_logger_delete.warning('Inside Delete function')
student = Student.query.filter_by(id=indexId).first()
if not student:
_logger_delete.warning('No Students in database')
return jsonify({'message': 'No user found'})
db.session.delete(student)
_logger_delete.warning('Deleted Student {} and commit to database'.
format(student))
db.session.commit()
return jsonify({'message': 'Student found and Deleted'})
<|reserved_special_token_1|>
from flask import Flask, render_template, redirect, url_for, request, jsonify, abort, request
from flask_sqlalchemy import SQLAlchemy
from src.flaskbasic import *
from src.flaskbasic.form import StudentForm
from src.flaskbasic.models import Student
import sys
import logging
_logger_adding = logging.getLogger('Adding results')
_logger_getting = logging.getLogger('Get results')
_logger_update = logging.getLogger('Update results')
_logger_delete = logging.getLogger('Delete results')
@application.route('/', methods=['GET', 'POST'])
def add_results():
form = StudentForm()
_logger_adding.warning('Inside Add Results function')
_logger_adding.warning('Student form waiting for Input')
if form.validate_on_submit():
_logger_adding.warning('When form is submitted with data')
student = Student(name=form.name.data, physics=form.physics.data,
maths=form.maths.data, chemistry=form.chemistry.data)
_logger_adding.warning(
'Student: {} , physics: {} , maths: {}, chemistry: {}'.format(
form.name.data, form.physics.data, form.maths.data, form.
chemistry.data))
db.session.add(student)
_logger_adding.warning('student results was added to database')
db.session.commit()
_logger_adding.warning('database commit')
return redirect(url_for('add_results'))
else:
return render_template('home.html', form=form)
@application.route('/results', methods=['GET', 'POST'])
def get_results():
_logger_getting.warning('retrieving all student results')
data = Student.query.all()
_logger_getting.warning('the students results have been collected for {}'
.format(data))
return render_template('results.html', data=data)
@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])
def edit_student(student_id):
form = StudentForm()
data = Student.query.get_or_404(student_id)
return render_template('edit_results.html', data=data)
@application.route('/edit_results/<int:student_id>/update_results', methods
=['GET', 'PUT', 'POST'])
def update_results(student_id):
student_data = Student.query.get_or_404(student_id)
form = StudentForm()
if form.validate_on_submit():
student_data.name = form.name.data
student_data.physics = form.physics.data
student_data.maths = form.maths.data
student_data.chemistry = form.chemistry.data
db.session.commit()
return redirect(url_for('edit_student', student_id=student_data.id))
elif request.method == 'GET':
form.name.data = student_data.name
form.physics.data = student_data.physics
form.maths.data = student_data.maths
form.chemistry.data = student_data.chemistry
return render_template('update_page.html', form=form)
@application.route('/edit_results/<int:student_id>/delete', methods=['GET'])
def delete_post(student_id):
if request.method == 'GET':
student_results = Student.query.get_or_404(student_id)
db.session.delete(student_results)
db.session.commit()
return redirect(url_for('get_results'))
@application.route('/results/<int:indexId>', methods=['DELETE'])
def delete_student(indexId):
_logger_delete.warning('Inside Delete function')
student = Student.query.filter_by(id=indexId).first()
if not student:
_logger_delete.warning('No Students in database')
return jsonify({'message': 'No user found'})
db.session.delete(student)
_logger_delete.warning('Deleted Student {} and commit to database'.
format(student))
db.session.commit()
return jsonify({'message': 'Student found and Deleted'})
<|reserved_special_token_1|>
from flask import Flask,render_template, redirect, url_for,request, jsonify, abort,request
from flask_sqlalchemy import SQLAlchemy
from src.flaskbasic import *
from src.flaskbasic.form import StudentForm
from src.flaskbasic.models import Student
import sys
import logging
# logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')
_logger_adding = logging.getLogger('Adding results')
_logger_getting = logging.getLogger('Get results')
_logger_update = logging.getLogger('Update results')
_logger_delete = logging.getLogger('Delete results')
# class Student(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(50), nullable= False)
# physics = db.Column(db.Integer)
# maths = db.Column(db.Integer)
# chemistry = db.Column(db.Integer)
@application.route('/', methods=['GET','POST'])
def add_results():
form = StudentForm()
_logger_adding.warning("Inside Add Results function")
_logger_adding.warning("Student form waiting for Input")
if form.validate_on_submit():
_logger_adding.warning("When form is submitted with data")
student = Student(name=form.name.data, physics=form.physics.data, maths=form.maths.data,chemistry=form.chemistry.data,)
_logger_adding.warning("Student: {} , physics: {} , maths: {}, chemistry: {}".format(form.name.data,form.physics.data,form.maths.data,form.chemistry.data))
db.session.add(student)
_logger_adding.warning('student results was added to database')
db.session.commit()
_logger_adding.warning("database commit")
return redirect(url_for("add_results"))
else:
return render_template('home.html', form=form)
@application.route('/results', methods=['GET','POST'])
def get_results():
_logger_getting.warning('retrieving all student results')
data = Student.query.all()
_logger_getting.warning('the students results have been collected for {}'.format(data))
return render_template('results.html', data = data)
@application.route('/edit_results/<int:student_id>', methods=['GET','POST'])
def edit_student(student_id):
form = StudentForm()
data = Student.query.get_or_404(student_id)
return render_template('edit_results.html',data=data)
@application.route('/edit_results/<int:student_id>/update_results',methods=['GET','PUT','POST'])
def update_results(student_id):
student_data = Student.query.get_or_404(student_id)
form = StudentForm()
if form.validate_on_submit():
student_data.name = form.name.data
student_data.physics = form.physics.data
student_data.maths = form.maths.data
student_data.chemistry = form.chemistry.data
db.session.commit()
return redirect(url_for('edit_student', student_id=student_data.id))
elif request.method == 'GET':
form.name.data = student_data.name
form.physics.data = student_data.physics
form.maths.data = student_data.maths
form.chemistry.data = student_data.chemistry
# return render_template('edit_results.html', student_data=student_data)
return render_template('update_page.html',form=form)
@application.route("/edit_results/<int:student_id>/delete", methods=['GET'])
def delete_post(student_id):
if request.method == 'GET':
student_results = Student.query.get_or_404(student_id)
db.session.delete(student_results)
db.session.commit()
return redirect(url_for('get_results'))
# @application.route('/results/<int:indexId>/update_results', methods=['PUT'])
# def update_results(indexId):
# _logger_update.warning("Inside Update function")
# student = Student.query.filter_by(id = indexId).first()
# if not student:
# _logger_update.warning("No Students in database")
# return render_template('home.html',form=form)
# student.name = request.json['name']
# student.physics = request.json.get('physics', "")
# student.maths = request.json.get('maths', "")
# student.chemistry = request.json.get('chemistry', "")
# _logger_update.warning("The updated results are Student Name: {}, Physics: {}, Maths: {}, Chemistry: {}".format(student.name,student.physics,student.maths,student.chemistry))
# db.session.commit()
# return jsonify({'student':'Pass'})
@application.route('/results/<int:indexId>', methods=['DELETE'])
def delete_student(indexId):
_logger_delete.warning("Inside Delete function")
student = Student.query.filter_by(id = indexId).first()
if not student:
_logger_delete.warning("No Students in database")
return jsonify({'message':'No user found'})
db.session.delete(student)
_logger_delete.warning("Deleted Student {} and commit to database".format(student))
db.session.commit()
return jsonify({'message':'Student found and Deleted'})
|
flexible
|
{
"blob_id": "18f9e55b62b30ce8c9d4a57cd9c159543a738770",
"index": 4709,
"step-1": "<mask token>\n\n\n@application.route('/results', methods=['GET', 'POST'])\ndef get_results():\n _logger_getting.warning('retrieving all student results')\n data = Student.query.all()\n _logger_getting.warning('the students results have been collected for {}'\n .format(data))\n return render_template('results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])\ndef edit_student(student_id):\n form = StudentForm()\n data = Student.query.get_or_404(student_id)\n return render_template('edit_results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>/update_results', methods\n =['GET', 'PUT', 'POST'])\ndef update_results(student_id):\n student_data = Student.query.get_or_404(student_id)\n form = StudentForm()\n if form.validate_on_submit():\n student_data.name = form.name.data\n student_data.physics = form.physics.data\n student_data.maths = form.maths.data\n student_data.chemistry = form.chemistry.data\n db.session.commit()\n return redirect(url_for('edit_student', student_id=student_data.id))\n elif request.method == 'GET':\n form.name.data = student_data.name\n form.physics.data = student_data.physics\n form.maths.data = student_data.maths\n form.chemistry.data = student_data.chemistry\n return render_template('update_page.html', form=form)\n\n\n<mask token>\n\n\n@application.route('/results/<int:indexId>', methods=['DELETE'])\ndef delete_student(indexId):\n _logger_delete.warning('Inside Delete function')\n student = Student.query.filter_by(id=indexId).first()\n if not student:\n _logger_delete.warning('No Students in database')\n return jsonify({'message': 'No user found'})\n db.session.delete(student)\n _logger_delete.warning('Deleted Student {} and commit to database'.\n format(student))\n db.session.commit()\n return jsonify({'message': 'Student found and Deleted'})\n",
"step-2": "<mask token>\n\n\n@application.route('/', methods=['GET', 'POST'])\ndef add_results():\n form = StudentForm()\n _logger_adding.warning('Inside Add Results function')\n _logger_adding.warning('Student form waiting for Input')\n if form.validate_on_submit():\n _logger_adding.warning('When form is submitted with data')\n student = Student(name=form.name.data, physics=form.physics.data,\n maths=form.maths.data, chemistry=form.chemistry.data)\n _logger_adding.warning(\n 'Student: {} , physics: {} , maths: {}, chemistry: {}'.format(\n form.name.data, form.physics.data, form.maths.data, form.\n chemistry.data))\n db.session.add(student)\n _logger_adding.warning('student results was added to database')\n db.session.commit()\n _logger_adding.warning('database commit')\n return redirect(url_for('add_results'))\n else:\n return render_template('home.html', form=form)\n\n\n@application.route('/results', methods=['GET', 'POST'])\ndef get_results():\n _logger_getting.warning('retrieving all student results')\n data = Student.query.all()\n _logger_getting.warning('the students results have been collected for {}'\n .format(data))\n return render_template('results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])\ndef edit_student(student_id):\n form = StudentForm()\n data = Student.query.get_or_404(student_id)\n return render_template('edit_results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>/update_results', methods\n =['GET', 'PUT', 'POST'])\ndef update_results(student_id):\n student_data = Student.query.get_or_404(student_id)\n form = StudentForm()\n if form.validate_on_submit():\n student_data.name = form.name.data\n student_data.physics = form.physics.data\n student_data.maths = form.maths.data\n student_data.chemistry = form.chemistry.data\n db.session.commit()\n return redirect(url_for('edit_student', student_id=student_data.id))\n elif request.method == 'GET':\n form.name.data = student_data.name\n form.physics.data = student_data.physics\n form.maths.data = student_data.maths\n form.chemistry.data = student_data.chemistry\n return render_template('update_page.html', form=form)\n\n\n@application.route('/edit_results/<int:student_id>/delete', methods=['GET'])\ndef delete_post(student_id):\n if request.method == 'GET':\n student_results = Student.query.get_or_404(student_id)\n db.session.delete(student_results)\n db.session.commit()\n return redirect(url_for('get_results'))\n\n\n@application.route('/results/<int:indexId>', methods=['DELETE'])\ndef delete_student(indexId):\n _logger_delete.warning('Inside Delete function')\n student = Student.query.filter_by(id=indexId).first()\n if not student:\n _logger_delete.warning('No Students in database')\n return jsonify({'message': 'No user found'})\n db.session.delete(student)\n _logger_delete.warning('Deleted Student {} and commit to database'.\n format(student))\n db.session.commit()\n return jsonify({'message': 'Student found and Deleted'})\n",
"step-3": "<mask token>\n_logger_adding = logging.getLogger('Adding results')\n_logger_getting = logging.getLogger('Get results')\n_logger_update = logging.getLogger('Update results')\n_logger_delete = logging.getLogger('Delete results')\n\n\n@application.route('/', methods=['GET', 'POST'])\ndef add_results():\n form = StudentForm()\n _logger_adding.warning('Inside Add Results function')\n _logger_adding.warning('Student form waiting for Input')\n if form.validate_on_submit():\n _logger_adding.warning('When form is submitted with data')\n student = Student(name=form.name.data, physics=form.physics.data,\n maths=form.maths.data, chemistry=form.chemistry.data)\n _logger_adding.warning(\n 'Student: {} , physics: {} , maths: {}, chemistry: {}'.format(\n form.name.data, form.physics.data, form.maths.data, form.\n chemistry.data))\n db.session.add(student)\n _logger_adding.warning('student results was added to database')\n db.session.commit()\n _logger_adding.warning('database commit')\n return redirect(url_for('add_results'))\n else:\n return render_template('home.html', form=form)\n\n\n@application.route('/results', methods=['GET', 'POST'])\ndef get_results():\n _logger_getting.warning('retrieving all student results')\n data = Student.query.all()\n _logger_getting.warning('the students results have been collected for {}'\n .format(data))\n return render_template('results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])\ndef edit_student(student_id):\n form = StudentForm()\n data = Student.query.get_or_404(student_id)\n return render_template('edit_results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>/update_results', methods\n =['GET', 'PUT', 'POST'])\ndef update_results(student_id):\n student_data = Student.query.get_or_404(student_id)\n form = StudentForm()\n if form.validate_on_submit():\n student_data.name = form.name.data\n student_data.physics = form.physics.data\n student_data.maths = form.maths.data\n student_data.chemistry = form.chemistry.data\n db.session.commit()\n return redirect(url_for('edit_student', student_id=student_data.id))\n elif request.method == 'GET':\n form.name.data = student_data.name\n form.physics.data = student_data.physics\n form.maths.data = student_data.maths\n form.chemistry.data = student_data.chemistry\n return render_template('update_page.html', form=form)\n\n\n@application.route('/edit_results/<int:student_id>/delete', methods=['GET'])\ndef delete_post(student_id):\n if request.method == 'GET':\n student_results = Student.query.get_or_404(student_id)\n db.session.delete(student_results)\n db.session.commit()\n return redirect(url_for('get_results'))\n\n\n@application.route('/results/<int:indexId>', methods=['DELETE'])\ndef delete_student(indexId):\n _logger_delete.warning('Inside Delete function')\n student = Student.query.filter_by(id=indexId).first()\n if not student:\n _logger_delete.warning('No Students in database')\n return jsonify({'message': 'No user found'})\n db.session.delete(student)\n _logger_delete.warning('Deleted Student {} and commit to database'.\n format(student))\n db.session.commit()\n return jsonify({'message': 'Student found and Deleted'})\n",
"step-4": "from flask import Flask, render_template, redirect, url_for, request, jsonify, abort, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom src.flaskbasic import *\nfrom src.flaskbasic.form import StudentForm\nfrom src.flaskbasic.models import Student\nimport sys\nimport logging\n_logger_adding = logging.getLogger('Adding results')\n_logger_getting = logging.getLogger('Get results')\n_logger_update = logging.getLogger('Update results')\n_logger_delete = logging.getLogger('Delete results')\n\n\n@application.route('/', methods=['GET', 'POST'])\ndef add_results():\n form = StudentForm()\n _logger_adding.warning('Inside Add Results function')\n _logger_adding.warning('Student form waiting for Input')\n if form.validate_on_submit():\n _logger_adding.warning('When form is submitted with data')\n student = Student(name=form.name.data, physics=form.physics.data,\n maths=form.maths.data, chemistry=form.chemistry.data)\n _logger_adding.warning(\n 'Student: {} , physics: {} , maths: {}, chemistry: {}'.format(\n form.name.data, form.physics.data, form.maths.data, form.\n chemistry.data))\n db.session.add(student)\n _logger_adding.warning('student results was added to database')\n db.session.commit()\n _logger_adding.warning('database commit')\n return redirect(url_for('add_results'))\n else:\n return render_template('home.html', form=form)\n\n\n@application.route('/results', methods=['GET', 'POST'])\ndef get_results():\n _logger_getting.warning('retrieving all student results')\n data = Student.query.all()\n _logger_getting.warning('the students results have been collected for {}'\n .format(data))\n return render_template('results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>', methods=['GET', 'POST'])\ndef edit_student(student_id):\n form = StudentForm()\n data = Student.query.get_or_404(student_id)\n return render_template('edit_results.html', data=data)\n\n\n@application.route('/edit_results/<int:student_id>/update_results', methods\n =['GET', 'PUT', 'POST'])\ndef update_results(student_id):\n student_data = Student.query.get_or_404(student_id)\n form = StudentForm()\n if form.validate_on_submit():\n student_data.name = form.name.data\n student_data.physics = form.physics.data\n student_data.maths = form.maths.data\n student_data.chemistry = form.chemistry.data\n db.session.commit()\n return redirect(url_for('edit_student', student_id=student_data.id))\n elif request.method == 'GET':\n form.name.data = student_data.name\n form.physics.data = student_data.physics\n form.maths.data = student_data.maths\n form.chemistry.data = student_data.chemistry\n return render_template('update_page.html', form=form)\n\n\n@application.route('/edit_results/<int:student_id>/delete', methods=['GET'])\ndef delete_post(student_id):\n if request.method == 'GET':\n student_results = Student.query.get_or_404(student_id)\n db.session.delete(student_results)\n db.session.commit()\n return redirect(url_for('get_results'))\n\n\n@application.route('/results/<int:indexId>', methods=['DELETE'])\ndef delete_student(indexId):\n _logger_delete.warning('Inside Delete function')\n student = Student.query.filter_by(id=indexId).first()\n if not student:\n _logger_delete.warning('No Students in database')\n return jsonify({'message': 'No user found'})\n db.session.delete(student)\n _logger_delete.warning('Deleted Student {} and commit to database'.\n format(student))\n db.session.commit()\n return jsonify({'message': 'Student found and Deleted'})\n",
"step-5": "from flask import Flask,render_template, redirect, url_for,request, jsonify, abort,request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom src.flaskbasic import *\r\nfrom src.flaskbasic.form import StudentForm\r\nfrom src.flaskbasic.models import Student\r\nimport sys\r\nimport logging\r\n\r\n# logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')\r\n_logger_adding = logging.getLogger('Adding results')\r\n_logger_getting = logging.getLogger('Get results')\r\n_logger_update = logging.getLogger('Update results')\r\n_logger_delete = logging.getLogger('Delete results')\r\n\r\n# class Student(db.Model):\r\n# id = db.Column(db.Integer, primary_key=True)\r\n# name = db.Column(db.String(50), nullable= False)\r\n# physics = db.Column(db.Integer)\r\n# maths = db.Column(db.Integer)\r\n# chemistry = db.Column(db.Integer)\r\n\r\n@application.route('/', methods=['GET','POST'])\r\ndef add_results():\r\n form = StudentForm()\r\n _logger_adding.warning(\"Inside Add Results function\")\r\n _logger_adding.warning(\"Student form waiting for Input\")\r\n if form.validate_on_submit():\r\n _logger_adding.warning(\"When form is submitted with data\")\r\n student = Student(name=form.name.data, physics=form.physics.data, maths=form.maths.data,chemistry=form.chemistry.data,)\r\n _logger_adding.warning(\"Student: {} , physics: {} , maths: {}, chemistry: {}\".format(form.name.data,form.physics.data,form.maths.data,form.chemistry.data))\r\n db.session.add(student)\r\n _logger_adding.warning('student results was added to database')\r\n db.session.commit()\r\n _logger_adding.warning(\"database commit\")\r\n return redirect(url_for(\"add_results\"))\r\n else:\r\n return render_template('home.html', form=form)\r\n\r\n@application.route('/results', methods=['GET','POST'])\r\ndef get_results():\r\n _logger_getting.warning('retrieving all student results')\r\n data = Student.query.all()\r\n _logger_getting.warning('the students results have been collected for {}'.format(data))\r\n return render_template('results.html', data = data)\r\n\r\n@application.route('/edit_results/<int:student_id>', methods=['GET','POST'])\r\ndef edit_student(student_id):\r\n form = StudentForm()\r\n data = Student.query.get_or_404(student_id)\r\n return render_template('edit_results.html',data=data)\r\n\r\n@application.route('/edit_results/<int:student_id>/update_results',methods=['GET','PUT','POST'])\r\ndef update_results(student_id):\r\n student_data = Student.query.get_or_404(student_id)\r\n form = StudentForm()\r\n if form.validate_on_submit():\r\n student_data.name = form.name.data\r\n student_data.physics = form.physics.data\r\n student_data.maths = form.maths.data\r\n student_data.chemistry = form.chemistry.data\r\n db.session.commit()\r\n return redirect(url_for('edit_student', student_id=student_data.id))\r\n elif request.method == 'GET':\r\n form.name.data = student_data.name\r\n form.physics.data = student_data.physics\r\n form.maths.data = student_data.maths\r\n form.chemistry.data = student_data.chemistry\r\n # return render_template('edit_results.html', student_data=student_data)\r\n return render_template('update_page.html',form=form)\r\n\r\n@application.route(\"/edit_results/<int:student_id>/delete\", methods=['GET'])\r\ndef delete_post(student_id):\r\n if request.method == 'GET':\r\n student_results = Student.query.get_or_404(student_id)\r\n db.session.delete(student_results)\r\n db.session.commit()\r\n return redirect(url_for('get_results'))\r\n\r\n# @application.route('/results/<int:indexId>/update_results', methods=['PUT'])\r\n# def update_results(indexId):\r\n# _logger_update.warning(\"Inside Update function\")\r\n# student = Student.query.filter_by(id = indexId).first()\r\n\r\n# if not student:\r\n# _logger_update.warning(\"No Students in database\")\r\n# return render_template('home.html',form=form)\r\n\r\n# student.name = request.json['name']\r\n# student.physics = request.json.get('physics', \"\")\r\n# student.maths = request.json.get('maths', \"\")\r\n# student.chemistry = request.json.get('chemistry', \"\")\r\n# _logger_update.warning(\"The updated results are Student Name: {}, Physics: {}, Maths: {}, Chemistry: {}\".format(student.name,student.physics,student.maths,student.chemistry)) \r\n# db.session.commit()\r\n \r\n# return jsonify({'student':'Pass'})\r\n\r\n@application.route('/results/<int:indexId>', methods=['DELETE'])\r\ndef delete_student(indexId):\r\n _logger_delete.warning(\"Inside Delete function\")\r\n student = Student.query.filter_by(id = indexId).first()\r\n\r\n if not student:\r\n _logger_delete.warning(\"No Students in database\")\r\n return jsonify({'message':'No user found'})\r\n\r\n db.session.delete(student)\r\n _logger_delete.warning(\"Deleted Student {} and commit to database\".format(student))\r\n db.session.commit()\r\n\r\n return jsonify({'message':'Student found and Deleted'})\r\n\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from Song import Song
class FroggyWoogie(Song):
def __init__(self):
super(FroggyWoogie, self).__init__()
self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'
self.plan = [[0.0, 32, 'W', 16.271], [16.271, 16, 'S', 8.135], [
24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18,
'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118
], [81.355, 32, 'W', 16.293], [97.648, 32, 'S', 16.25], [
113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44,
64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S',
16.271], [211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271],
[244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61,
32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S',
16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]
|
normal
|
{
"blob_id": "1df1081308ead28c023774a8671df8a0671a1bba",
"index": 4177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FroggyWoogie(Song):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FroggyWoogie(Song):\n\n def __init__(self):\n super(FroggyWoogie, self).__init__()\n self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'\n self.plan = [[0.0, 32, 'W', 16.271], [16.271, 16, 'S', 8.135], [\n 24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18,\n 'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118\n ], [81.355, 32, 'W', 16.293], [97.648, 32, 'S', 16.25], [\n 113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44,\n 64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S',\n 16.271], [211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271],\n [244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61,\n 32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S',\n 16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]\n",
"step-4": "from Song import Song\n\n\nclass FroggyWoogie(Song):\n\n def __init__(self):\n super(FroggyWoogie, self).__init__()\n self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'\n self.plan = [[0.0, 32, 'W', 16.271], [16.271, 16, 'S', 8.135], [\n 24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18,\n 'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118\n ], [81.355, 32, 'W', 16.293], [97.648, 32, 'S', 16.25], [\n 113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44,\n 64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S',\n 16.271], [211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271],\n [244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61,\n 32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S',\n 16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
<|reserved_special_token_0|>
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0] * int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
<|reserved_special_token_0|>
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2 * numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min / 2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = [''] * len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
<|reserved_special_token_0|>
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,
supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = 'LEFT'
for i in range(1, numRows):
backtrack[i][0] = 'UP'
for i in range(1, numRows):
for j in range(1, numCols):
if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:
backtrack[i][j] = 'UP'
elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:
backtrack[i][j] = 'LEFT'
else:
backtrack[i][j] = 'DIAG'
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ''
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while row != 0 or col != 0:
k = len(string)
if backtrack[row][col] == 'UP':
if orientation == 'top':
aligned = '-' + aligned
elif orientation == 'side':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == 'LEFT':
if orientation == 'side':
aligned = '-' + aligned
elif orientation == 'top':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
col -= 1
else:
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[''] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, 'side')
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')
return a
<|reserved_special_token_0|>
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.
alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
<|reserved_special_token_0|>
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0] * int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
<|reserved_special_token_0|>
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
<|reserved_special_token_0|>
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row + 1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
<|reserved_special_token_0|>
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2 * numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min / 2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = [''] * len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(
len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * -supergap
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * -supergap
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i - 1][j] - supergap
left = scoreTable[i][j - 1] - supergap
diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,
i - 1, j - 1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,
supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = 'LEFT'
for i in range(1, numRows):
backtrack[i][0] = 'UP'
for i in range(1, numRows):
for j in range(1, numCols):
if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:
backtrack[i][j] = 'UP'
elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:
backtrack[i][j] = 'LEFT'
else:
backtrack[i][j] = 'DIAG'
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ''
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while row != 0 or col != 0:
k = len(string)
if backtrack[row][col] == 'UP':
if orientation == 'top':
aligned = '-' + aligned
elif orientation == 'side':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == 'LEFT':
if orientation == 'side':
aligned = '-' + aligned
elif orientation == 'top':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
col -= 1
else:
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[''] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, 'side')
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')
return a
<|reserved_special_token_0|>
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.
alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
def initializeMatrix(m, n):
mtx = [[(0) for x in range(n)] for y in range(m)]
return mtx
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0] * int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
def initializeTree(speciesNames):
numLeaves = len(speciesNames)
t = [Node] * (2 * numLeaves - 1)
for i in range(len(t)):
vx = Node()
if i < numLeaves:
vx.label = speciesNames[i]
else:
vx.label = 'Ancestor species' + str(i)
vx.num = i
t[i] = vx
return t
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
<|reserved_special_token_0|>
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row + 1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
<|reserved_special_token_0|>
def addRowCol(mtx, clusters, row, col):
newRow = [0] * (len(mtx) + 1)
for i in range(len(newRow) - 1):
if i != row and i != col:
size1 = countLeaves(clusters[row])
size2 = countLeaves(clusters[col])
avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)
newRow[i] = avg
mtx.append(newRow)
for i in range(len(newRow) - 1):
mtx[i].append(newRow[i])
return mtx
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2 * numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min / 2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = [''] * len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(
len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * -supergap
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * -supergap
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i - 1][j] - supergap
left = scoreTable[i][j - 1] - supergap
diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,
i - 1, j - 1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,
supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = 'LEFT'
for i in range(1, numRows):
backtrack[i][0] = 'UP'
for i in range(1, numRows):
for j in range(1, numCols):
if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:
backtrack[i][j] = 'UP'
elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:
backtrack[i][j] = 'LEFT'
else:
backtrack[i][j] = 'DIAG'
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ''
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while row != 0 or col != 0:
k = len(string)
if backtrack[row][col] == 'UP':
if orientation == 'top':
aligned = '-' + aligned
elif orientation == 'side':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == 'LEFT':
if orientation == 'side':
aligned = '-' + aligned
elif orientation == 'top':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
col -= 1
else:
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[''] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, 'side')
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')
return a
def progressiveAlign(align1, align2, match, mismatch, gap, supergap):
scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,
supergap)
backtrack = progressiveBacktrack(scoreTable, align1, align2, match,
mismatch, gap, supergap)
opt = outputProgressiveAlign(align1, align2, backtrack)
return opt
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.
alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
def initializeMatrix(m, n):
mtx = [[(0) for x in range(n)] for y in range(m)]
return mtx
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0] * int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
def initializeTree(speciesNames):
numLeaves = len(speciesNames)
t = [Node] * (2 * numLeaves - 1)
for i in range(len(t)):
vx = Node()
if i < numLeaves:
vx.label = speciesNames[i]
else:
vx.label = 'Ancestor species' + str(i)
vx.num = i
t[i] = vx
return t
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
def delClusters(clusters, row, col):
del clusters[col]
del clusters[row]
return clusters
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row + 1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
<|reserved_special_token_0|>
def addRowCol(mtx, clusters, row, col):
newRow = [0] * (len(mtx) + 1)
for i in range(len(newRow) - 1):
if i != row and i != col:
size1 = countLeaves(clusters[row])
size2 = countLeaves(clusters[col])
avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)
newRow[i] = avg
mtx.append(newRow)
for i in range(len(newRow) - 1):
mtx[i].append(newRow[i])
return mtx
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2 * numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min / 2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = [''] * len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(
len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * -supergap
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * -supergap
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i - 1][j] - supergap
left = scoreTable[i][j - 1] - supergap
diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,
i - 1, j - 1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,
supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = 'LEFT'
for i in range(1, numRows):
backtrack[i][0] = 'UP'
for i in range(1, numRows):
for j in range(1, numCols):
if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:
backtrack[i][j] = 'UP'
elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:
backtrack[i][j] = 'LEFT'
else:
backtrack[i][j] = 'DIAG'
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ''
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while row != 0 or col != 0:
k = len(string)
if backtrack[row][col] == 'UP':
if orientation == 'top':
aligned = '-' + aligned
elif orientation == 'side':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == 'LEFT':
if orientation == 'side':
aligned = '-' + aligned
elif orientation == 'top':
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
col -= 1
else:
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[''] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, 'side')
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')
return a
def progressiveAlign(align1, align2, match, mismatch, gap, supergap):
scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,
supergap)
backtrack = progressiveBacktrack(scoreTable, align1, align2, match,
mismatch, gap, supergap)
opt = outputProgressiveAlign(align1, align2, backtrack)
return opt
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.
alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.
# Main algos UPGMA and CLUSTALW
from dataclasses import dataclass
import FormattingET
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
#UPGMA algos
def initializeMatrix(m, n):
mtx = [[0 for x in range(n)] for y in range(m)]
return mtx
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0]*int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
def initializeTree(speciesNames):
numLeaves = len(speciesNames)
t = [Node]*(2*numLeaves - 1)
for i in range(len(t)):
vx = Node()
if i < numLeaves:
vx.label = speciesNames[i]
else:
vx.label = "Ancestor species" + str(i)
vx.num = i
t[i] = vx
return t
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
def delClusters(clusters, row, col):
del clusters[col]
del clusters[row]
return clusters
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row+1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
def delRowCol(mtx, row, col):
del mtx[col]
del mtx[row]
for i in range(len(mtx)):
del mtx[i][col]
del mtx[i][row]
return mtx
def addRowCol(mtx, clusters, row, col):
newRow = [0]*(len(mtx) + 1)
for i in range(len(newRow) - 1):
if i != row and i != col:
size1 = countLeaves(clusters[row])
size2 = countLeaves(clusters[col])
avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)
newRow[i] = avg
mtx.append(newRow)
for i in range(len(newRow) - 1):
mtx[i].append(newRow[i])
return mtx
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2*numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min/2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
#CLUSTALW algos
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = ['']*len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * (-supergap)
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * (-supergap)
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i-1][j] - supergap
left = scoreTable[i][j-1] - supergap
diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = "LEFT"
for i in range(1, numRows):
backtrack[i][0] = "UP"
for i in range(1, numRows):
for j in range(1, numCols):
if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):
backtrack[i][j] = "UP"
elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:
backtrack[i][j] = "LEFT"
else:
backtrack[i][j] = "DIAG"
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ""
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while(row != 0 or col != 0):
k = len(string)
if backtrack[row][col] == "UP":
if (orientation == "top"):
aligned = "-" + aligned
elif orientation == "side":
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == "LEFT":
if (orientation == "side"):
aligned = "-" + aligned
elif orientation == "top":
aligned = str(string[k-1]) + aligned
string = string[:k-1]
col -= 1
else:
aligned = str(string[k-1]) + aligned
string = string[:k-1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[""] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, "side")
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, "top")
return a
def progressiveAlign(align1, align2, match, mismatch, gap, supergap):
scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)
backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)
opt = outputProgressiveAlign(align1, align2, backtrack)
return opt
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
#main
if __name__ == "__main__":
print("UPGMA Test")
mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]
labels = ["H", "C", "W", "S"]
tree = upgma(mtx, labels)
print("CLUSTALW Test")
#cats = ["USA", "CHN", "ITA"]
mtxreturn = FormattingET.readMatrixFromFile("Datasets/Input/Test-Example/distance.mtx")
mtx1 = mtxreturn[0]
labels1 = mtxreturn[1]
t = upgma(mtx1, labels1)
match = 1.0
mismatch = 1.0
gap = 1.0
supergap = 6.0
dnaMap = FormattingET.readDNAStringsFromFile("Datasets/Input/Test-Example/RAW/toy-example.fasta")
keyvalues = FormattingET.getKeyValues(dnaMap)
newLabels = keyvalues[0]
newDnaStrings = keyvalues[1]
dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)
align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)
FormattingET.writeAlignmentToFile(align, labels1, "Datasets/Output/Test-Example", "toy.aln")
print(align)
|
flexible
|
{
"blob_id": "53cf2dfe3319c39ca6f1dc890eea578fae654b5b",
"index": 8847,
"step-1": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\n<mask token>\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\n<mask token>\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\n<mask token>\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\n<mask token>\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\n<mask token>\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\n<mask token>\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\n<mask token>\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\n<mask token>\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\ndef initializeMatrix(m, n):\n mtx = [[(0) for x in range(n)] for y in range(m)]\n return mtx\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n t = [Node] * (2 * numLeaves - 1)\n for i in range(len(t)):\n vx = Node()\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = 'Ancestor species' + str(i)\n vx.num = i\n t[i] = vx\n return t\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\n<mask token>\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0] * (len(mtx) + 1)\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n mtx.append(newRow)\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n return mtx\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,\n supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match,\n mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n return opt\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\ndef initializeMatrix(m, n):\n mtx = [[(0) for x in range(n)] for y in range(m)]\n return mtx\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n t = [Node] * (2 * numLeaves - 1)\n for i in range(len(t)):\n vx = Node()\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = 'Ancestor species' + str(i)\n vx.num = i\n t[i] = vx\n return t\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\ndef delClusters(clusters, row, col):\n del clusters[col]\n del clusters[row]\n return clusters\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0] * (len(mtx) + 1)\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n mtx.append(newRow)\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n return mtx\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,\n supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match,\n mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n return opt\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-5": "# Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.\n# Main algos UPGMA and CLUSTALW\nfrom dataclasses import dataclass\nimport FormattingET\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n#UPGMA algos\n\ndef initializeMatrix(m, n):\n mtx = [[0 for x in range(n)] for y in range(m)]\n return mtx\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0]*int(numLeaves)\n\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n\n return clusters\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n\n t = [Node]*(2*numLeaves - 1)\n\n for i in range(len(t)):\n vx = Node()\n\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = \"Ancestor species\" + str(i)\n vx.num = i\n t[i] = vx\n\n return t\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n\n return countLeaves(v.child1) + countLeaves(v.child2)\n\ndef delClusters(clusters, row, col):\n del clusters[col]\n del clusters[row]\n return clusters\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row+1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n\n return minRow, minCol, minElement\n\ndef delRowCol(mtx, row, col):\n del mtx[col]\n del mtx[row]\n\n for i in range(len(mtx)):\n del mtx[i][col]\n del mtx[i][row]\n\n return mtx\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0]*(len(mtx) + 1)\n\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n\n mtx.append(newRow)\n\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n\n return mtx\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n\n for i in range(numLeaves, 2*numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n\n tree[i].age = min/2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n\n clusters = delClusters(clusters, row, col)\n\n return tree\n\n#CLUSTALW algos\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = ['']*len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n\n score = 0.0\n\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n\n return score\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]\n\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * (-supergap)\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * (-supergap)\n\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n\n up = scoreTable[i-1][j] - supergap\n left = scoreTable[i][j-1] - supergap\n diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)\n\n scoreTable[i][j] = max(up, left, diag)\n\n return scoreTable\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n\n for i in range(1, numCols):\n backtrack[0][i] = \"LEFT\"\n for i in range(1, numRows):\n backtrack[i][0] = \"UP\"\n\n for i in range(1, numRows):\n for j in range(1, numCols):\n if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):\n backtrack[i][j] = \"UP\"\n elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:\n backtrack[i][j] = \"LEFT\"\n else:\n backtrack[i][j] = \"DIAG\"\n\n return backtrack\n\ndef backtracker(string, backtrack, orientation):\n aligned = \"\"\n\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n\n while(row != 0 or col != 0):\n k = len(string)\n\n if backtrack[row][col] == \"UP\":\n if (orientation == \"top\"):\n aligned = \"-\" + aligned\n elif orientation == \"side\":\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == \"LEFT\":\n if (orientation == \"side\"):\n aligned = \"-\" + aligned\n elif orientation == \"top\":\n aligned = str(string[k-1]) + aligned\n string = string[:k-1]\n col -= 1\n else:\n aligned = str(string[k-1]) + aligned\n string = string[:k-1]\n row -= 1\n col -= 1\n\n return aligned\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[\"\"] for i in range(len(align1) + len(align2))]\n\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, \"side\")\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, \"top\")\n\n return a\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n\n return opt\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)\n\n return guideTree[len(guideTree) - 1].alignment\n\n\n#main\nif __name__ == \"__main__\":\n print(\"UPGMA Test\")\n mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]\n labels = [\"H\", \"C\", \"W\", \"S\"]\n tree = upgma(mtx, labels)\n\n print(\"CLUSTALW Test\")\n \n #cats = [\"USA\", \"CHN\", \"ITA\"]\n\n mtxreturn = FormattingET.readMatrixFromFile(\"Datasets/Input/Test-Example/distance.mtx\")\n mtx1 = mtxreturn[0]\n labels1 = mtxreturn[1]\n\n t = upgma(mtx1, labels1)\n\n match = 1.0\n mismatch = 1.0\n gap = 1.0\n supergap = 6.0\n \n dnaMap = FormattingET.readDNAStringsFromFile(\"Datasets/Input/Test-Example/RAW/toy-example.fasta\")\n keyvalues = FormattingET.getKeyValues(dnaMap)\n newLabels = keyvalues[0]\n newDnaStrings = keyvalues[1]\n\n dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)\n align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)\n FormattingET.writeAlignmentToFile(align, labels1, \"Datasets/Output/Test-Example\", \"toy.aln\")\n print(align)\n ",
"step-ids": [
9,
12,
16,
17,
21
]
}
|
[
9,
12,
16,
17,
21
] |
class Animal:
def eat(self):
print('吃')
def bark(self):
print('喝')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Dog(Animal):
def bark(self):
print('汪汪叫')
class XiaoTianQuan(Dog):
def bark(self):
print('像神一样的叫唤...')
Dog.bark(self)
print('$%^*%^#%$%')
def fly(self):
print('我会飞')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Animal:
def eat(self):
print('吃')
def bark(self):
print('喝')
def run(seft):
print('跑')
<|reserved_special_token_0|>
class Dog(Animal):
def bark(self):
print('汪汪叫')
class XiaoTianQuan(Dog):
def bark(self):
print('像神一样的叫唤...')
Dog.bark(self)
print('$%^*%^#%$%')
def fly(self):
print('我会飞')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Animal:
def eat(self):
print('吃')
def bark(self):
print('喝')
def run(seft):
print('跑')
def sleep(self):
print('睡')
class Dog(Animal):
def bark(self):
print('汪汪叫')
class XiaoTianQuan(Dog):
def bark(self):
print('像神一样的叫唤...')
Dog.bark(self)
print('$%^*%^#%$%')
def fly(self):
print('我会飞')
<|reserved_special_token_0|>
xtq.bark()
<|reserved_special_token_1|>
class Animal:
def eat(self):
print('吃')
def bark(self):
print('喝')
def run(seft):
print('跑')
def sleep(self):
print('睡')
class Dog(Animal):
def bark(self):
print('汪汪叫')
class XiaoTianQuan(Dog):
def bark(self):
print('像神一样的叫唤...')
Dog.bark(self)
print('$%^*%^#%$%')
def fly(self):
print('我会飞')
xtq = XiaoTianQuan()
xtq.bark()
<|reserved_special_token_1|>
#! /usr/bin/python3
class Animal:
def eat(self):
print("吃")
def bark(self):
print("喝")
def run(seft):
print("跑")
def sleep(self):
print("睡")
class Dog(Animal):
# 子类拥有父类的所有属性和方法
def bark(self):
print("汪汪叫")
class XiaoTianQuan(Dog): # 3. 增加其他子类代码
def bark(self):
# 1. 针对子类特有的需求, 编写代码
print("像神一样的叫唤...")
# 2. 使用super(). 调用原来在父类中封装的方法
# super().bark()
# 注意: 如果使用子类调用方法, 会出现递归调用 - 死循环
# 父类名.方法(self)
Dog.bark(self)
# 3. 增加其他子类代码
print("$%^*%^#%$%")
def fly(self):
print("我会飞")
xtq = XiaoTianQuan()
xtq.bark()
|
flexible
|
{
"blob_id": "d7aa85c2458ee12a8de0f75419945fbe2acdf95d",
"index": 3946,
"step-1": "class Animal:\n\n def eat(self):\n print('吃')\n\n def bark(self):\n print('喝')\n <mask token>\n <mask token>\n\n\nclass Dog(Animal):\n\n def bark(self):\n print('汪汪叫')\n\n\nclass XiaoTianQuan(Dog):\n\n def bark(self):\n print('像神一样的叫唤...')\n Dog.bark(self)\n print('$%^*%^#%$%')\n\n def fly(self):\n print('我会飞')\n\n\n<mask token>\n",
"step-2": "class Animal:\n\n def eat(self):\n print('吃')\n\n def bark(self):\n print('喝')\n\n def run(seft):\n print('跑')\n <mask token>\n\n\nclass Dog(Animal):\n\n def bark(self):\n print('汪汪叫')\n\n\nclass XiaoTianQuan(Dog):\n\n def bark(self):\n print('像神一样的叫唤...')\n Dog.bark(self)\n print('$%^*%^#%$%')\n\n def fly(self):\n print('我会飞')\n\n\n<mask token>\n",
"step-3": "class Animal:\n\n def eat(self):\n print('吃')\n\n def bark(self):\n print('喝')\n\n def run(seft):\n print('跑')\n\n def sleep(self):\n print('睡')\n\n\nclass Dog(Animal):\n\n def bark(self):\n print('汪汪叫')\n\n\nclass XiaoTianQuan(Dog):\n\n def bark(self):\n print('像神一样的叫唤...')\n Dog.bark(self)\n print('$%^*%^#%$%')\n\n def fly(self):\n print('我会飞')\n\n\n<mask token>\nxtq.bark()\n",
"step-4": "class Animal:\n\n def eat(self):\n print('吃')\n\n def bark(self):\n print('喝')\n\n def run(seft):\n print('跑')\n\n def sleep(self):\n print('睡')\n\n\nclass Dog(Animal):\n\n def bark(self):\n print('汪汪叫')\n\n\nclass XiaoTianQuan(Dog):\n\n def bark(self):\n print('像神一样的叫唤...')\n Dog.bark(self)\n print('$%^*%^#%$%')\n\n def fly(self):\n print('我会飞')\n\n\nxtq = XiaoTianQuan()\nxtq.bark()\n",
"step-5": "#! /usr/bin/python3\nclass Animal:\n \n def eat(self):\n print(\"吃\")\n def bark(self):\n print(\"喝\")\n\n def run(seft):\n print(\"跑\")\n\n def sleep(self):\n print(\"睡\")\n\n\nclass Dog(Animal):\n # 子类拥有父类的所有属性和方法\n def bark(self):\n print(\"汪汪叫\")\n\nclass XiaoTianQuan(Dog): # 3. 增加其他子类代码 \n def bark(self): \n # 1. 针对子类特有的需求, 编写代码\n print(\"像神一样的叫唤...\")\n # 2. 使用super(). 调用原来在父类中封装的方法\n # super().bark()\n # 注意: 如果使用子类调用方法, 会出现递归调用 - 死循环\n # 父类名.方法(self)\n Dog.bark(self)\n # 3. 增加其他子类代码\n print(\"$%^*%^#%$%\")\n def fly(self):\n print(\"我会飞\")\n\n\nxtq = XiaoTianQuan()\nxtq.bark()\n\n",
"step-ids": [
8,
9,
11,
12,
13
]
}
|
[
8,
9,
11,
12,
13
] |
# coding=utf-8
class Movie:
def __init__(self,movieid,moviename,score,poster):
self.movieid=movieid
self.moviename=moviename
self.score=score
self.poster=poster
for i in range(1,32):
print("<option value =\""+str(i)+"\">"+str(i)+"</option>")
|
normal
|
{
"blob_id": "856e62cf4cd443c7b3397e926f8fc4fece145f5b",
"index": 3447,
"step-1": "<mask token>\n",
"step-2": "class Movie:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Movie:\n\n def __init__(self, movieid, moviename, score, poster):\n self.movieid = movieid\n self.moviename = moviename\n self.score = score\n self.poster = poster\n\n\n<mask token>\n",
"step-4": "class Movie:\n\n def __init__(self, movieid, moviename, score, poster):\n self.movieid = movieid\n self.moviename = moviename\n self.score = score\n self.poster = poster\n\n\nfor i in range(1, 32):\n print('<option value =\"' + str(i) + '\">' + str(i) + '</option>')\n",
"step-5": "# coding=utf-8\r\nclass Movie:\r\n def __init__(self,movieid,moviename,score,poster):\r\n self.movieid=movieid\r\n self.moviename=moviename\r\n self.score=score\r\n self.poster=poster\r\n\r\nfor i in range(1,32):\r\n print(\"<option value =\\\"\"+str(i)+\"\\\">\"+str(i)+\"</option>\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config.read('config.ini')
<|reserved_special_token_0|>
logging.getLogger('transformers.tokenization_utils').setLevel(logLevel +
oneLevelUp)
logging.getLogger('transformers.modeling_utils').setLevel(logLevel + oneLevelUp
)
logging.getLogger('transformers.configuration_utils').setLevel(logLevel +
oneLevelUp)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M%S', level=logLevel + oneLevelUp)
logger.setLevel(logLevel)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = configparser.ConfigParser()
config.read('config.ini')
settings = config['Settings']
colors = config['Colors']
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
logLevel = settings.getint('log-level')
oneLevelUp = 20
logging.getLogger('transformers.tokenization_utils').setLevel(logLevel +
oneLevelUp)
logging.getLogger('transformers.modeling_utils').setLevel(logLevel + oneLevelUp
)
logging.getLogger('transformers.configuration_utils').setLevel(logLevel +
oneLevelUp)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M%S', level=logLevel + oneLevelUp)
logger.setLevel(logLevel)
<|reserved_special_token_1|>
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
settings = config['Settings']
colors = config['Colors']
import logging
logger = logging.getLogger(__name__)
logLevel = settings.getint('log-level')
oneLevelUp = 20
logging.getLogger('transformers.tokenization_utils').setLevel(logLevel +
oneLevelUp)
logging.getLogger('transformers.modeling_utils').setLevel(logLevel + oneLevelUp
)
logging.getLogger('transformers.configuration_utils').setLevel(logLevel +
oneLevelUp)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M%S', level=logLevel + oneLevelUp)
logger.setLevel(logLevel)
<|reserved_special_token_1|>
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
settings=config['Settings']
colors=config['Colors']
import logging
logger = logging.getLogger(__name__)
logLevel = settings.getint('log-level')
oneLevelUp = 20
#I don't know if this will work before loading the transformers module?
#silence transformers outputs when loading model
logging.getLogger("transformers.tokenization_utils").setLevel(logLevel+oneLevelUp)
logging.getLogger("transformers.modeling_utils").setLevel(logLevel+oneLevelUp)
logging.getLogger("transformers.configuration_utils").setLevel(logLevel+oneLevelUp)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M%S',
level=logLevel+oneLevelUp
)
logger.setLevel(logLevel)
|
flexible
|
{
"blob_id": "e4fb932c476ca0222a077a43499bf9164e1f27d0",
"index": 8896,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconfig.read('config.ini')\n<mask token>\nlogging.getLogger('transformers.tokenization_utils').setLevel(logLevel +\n oneLevelUp)\nlogging.getLogger('transformers.modeling_utils').setLevel(logLevel + oneLevelUp\n )\nlogging.getLogger('transformers.configuration_utils').setLevel(logLevel +\n oneLevelUp)\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M%S', level=logLevel + oneLevelUp)\nlogger.setLevel(logLevel)\n",
"step-3": "<mask token>\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nsettings = config['Settings']\ncolors = config['Colors']\n<mask token>\nlogger = logging.getLogger(__name__)\nlogLevel = settings.getint('log-level')\noneLevelUp = 20\nlogging.getLogger('transformers.tokenization_utils').setLevel(logLevel +\n oneLevelUp)\nlogging.getLogger('transformers.modeling_utils').setLevel(logLevel + oneLevelUp\n )\nlogging.getLogger('transformers.configuration_utils').setLevel(logLevel +\n oneLevelUp)\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M%S', level=logLevel + oneLevelUp)\nlogger.setLevel(logLevel)\n",
"step-4": "import configparser\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nsettings = config['Settings']\ncolors = config['Colors']\nimport logging\nlogger = logging.getLogger(__name__)\nlogLevel = settings.getint('log-level')\noneLevelUp = 20\nlogging.getLogger('transformers.tokenization_utils').setLevel(logLevel +\n oneLevelUp)\nlogging.getLogger('transformers.modeling_utils').setLevel(logLevel + oneLevelUp\n )\nlogging.getLogger('transformers.configuration_utils').setLevel(logLevel +\n oneLevelUp)\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M%S', level=logLevel + oneLevelUp)\nlogger.setLevel(logLevel)\n",
"step-5": "import configparser\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nsettings=config['Settings']\ncolors=config['Colors']\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogLevel = settings.getint('log-level')\noneLevelUp = 20\n\n#I don't know if this will work before loading the transformers module?\n#silence transformers outputs when loading model\nlogging.getLogger(\"transformers.tokenization_utils\").setLevel(logLevel+oneLevelUp)\nlogging.getLogger(\"transformers.modeling_utils\").setLevel(logLevel+oneLevelUp)\nlogging.getLogger(\"transformers.configuration_utils\").setLevel(logLevel+oneLevelUp)\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M%S',\n level=logLevel+oneLevelUp\n)\nlogger.setLevel(logLevel)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# script :: creating a datamodel that fits mahout from ratings.dat
ratings_dat = open('../data/movielens-1m/users.dat', 'r')
ratings_csv = open('../data/movielens-1m/users.txt', 'w')
for line in ratings_dat:
arr = line.split('::')
new_line = '\t'.join(arr)
ratings_csv.write(new_line)
ratings_dat.close()
ratings_csv.close()
|
normal
|
{
"blob_id": "2dd59681a0dcb5d3f1143385100c09c7783babf4",
"index": 76,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in ratings_dat:\n arr = line.split('::')\n new_line = '\\t'.join(arr)\n ratings_csv.write(new_line)\nratings_dat.close()\nratings_csv.close()\n",
"step-3": "ratings_dat = open('../data/movielens-1m/users.dat', 'r')\nratings_csv = open('../data/movielens-1m/users.txt', 'w')\nfor line in ratings_dat:\n arr = line.split('::')\n new_line = '\\t'.join(arr)\n ratings_csv.write(new_line)\nratings_dat.close()\nratings_csv.close()\n",
"step-4": "#!/usr/bin/env python\n# script :: creating a datamodel that fits mahout from ratings.dat\n\n\n\nratings_dat = open('../data/movielens-1m/users.dat', 'r')\nratings_csv = open('../data/movielens-1m/users.txt', 'w')\n\nfor line in ratings_dat:\n\tarr = line.split('::')\n\tnew_line = '\\t'.join(arr)\n\n\tratings_csv.write(new_line)\n\nratings_dat.close()\nratings_csv.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class AuthenticationCustom(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuthenticationCustom(admin.ModelAdmin):
list_display = 'email', 'id'
search_fields = ['email', 'mobile']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuthenticationCustom(admin.ModelAdmin):
list_display = 'email', 'id'
search_fields = ['email', 'mobile']
admin.site.register(Account, AuthenticationCustom)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Account
class AuthenticationCustom(admin.ModelAdmin):
list_display = 'email', 'id'
search_fields = ['email', 'mobile']
admin.site.register(Account, AuthenticationCustom)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Account
# Register your models here.
class AuthenticationCustom(admin.ModelAdmin):
list_display = ("email", "id")
search_fields = ["email", "mobile"]
admin.site.register(Account, AuthenticationCustom)
|
flexible
|
{
"blob_id": "4957e62deec6192aabdf7144f02b28c7ce60ed4b",
"index": 4250,
"step-1": "<mask token>\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n list_display = 'email', 'id'\n search_fields = ['email', 'mobile']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n list_display = 'email', 'id'\n search_fields = ['email', 'mobile']\n\n\nadmin.site.register(Account, AuthenticationCustom)\n",
"step-4": "from django.contrib import admin\nfrom .models import Account\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n list_display = 'email', 'id'\n search_fields = ['email', 'mobile']\n\n\nadmin.site.register(Account, AuthenticationCustom)\n",
"step-5": "from django.contrib import admin\nfrom .models import Account\n# Register your models here.\n\n\nclass AuthenticationCustom(admin.ModelAdmin):\n\tlist_display = (\"email\", \"id\")\n\n\tsearch_fields = [\"email\", \"mobile\"]\n\n\nadmin.site.register(Account, AuthenticationCustom)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def helloWorld():
print "We are in DEMO land!"
for i in range(10):
helloWorld()
print listBuilder()
def listBuilder():
b = []
for x in range(5):
b.append(10 * x)
return b
print "[done, for real]"
|
normal
|
{
"blob_id": "57516a17c1f3ee208076852369999d74dbb2b3ba",
"index": 98,
"step-1": "def helloWorld():\n print \"We are in DEMO land!\"\n\nfor i in range(10):\n helloWorld()\nprint listBuilder()\n\ndef listBuilder():\n b = []\n for x in range(5):\n b.append(10 * x)\n return b\n\nprint \"[done, for real]\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def get_load(pkt):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
load = pkt[Raw].load
pkt_frag_loads = frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
return full_load
def frag_remover(ack, load):
"""
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
"""
global pkt_frag_loads
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
for ack in copy_pkt_frag_loads[ip_port]:
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][
-200:]
return pkt_frag_loads
def frag_joiner(ack, src_ip_port, load):
"""
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
"""
global pkt_frag_loads
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
<|reserved_special_token_0|>
def get_load(pkt):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
load = pkt[Raw].load
pkt_frag_loads = frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
return full_load
def frag_remover(ack, load):
"""
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
"""
global pkt_frag_loads
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
for ack in copy_pkt_frag_loads[ip_port]:
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][
-200:]
return pkt_frag_loads
def frag_joiner(ack, src_ip_port, load):
"""
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
"""
global pkt_frag_loads
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
<|reserved_special_token_0|>
conf.verb = 0
<|reserved_special_token_0|>
pkt_frag_loads = OrderedDict()
def get_load(pkt):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
load = pkt[Raw].load
pkt_frag_loads = frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
return full_load
def frag_remover(ack, load):
"""
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
"""
global pkt_frag_loads
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
for ack in copy_pkt_frag_loads[ip_port]:
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][
-200:]
return pkt_frag_loads
def frag_joiner(ack, src_ip_port, load):
"""
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
"""
global pkt_frag_loads
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
<|reserved_special_token_1|>
import logging
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
from scapy.all import *
conf.verb = 0
from collections import OrderedDict
pkt_frag_loads = OrderedDict()
def get_load(pkt):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
load = pkt[Raw].load
pkt_frag_loads = frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
return full_load
def frag_remover(ack, load):
"""
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
"""
global pkt_frag_loads
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
for ack in copy_pkt_frag_loads[ip_port]:
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][
-200:]
return pkt_frag_loads
def frag_joiner(ack, src_ip_port, load):
"""
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
"""
global pkt_frag_loads
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
<|reserved_special_token_1|>
# dealing with the packet fragments and their reconsttruction
import logging
# shut up scapy
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
conf.verb=0
from collections import OrderedDict
pkt_frag_loads = OrderedDict()
def get_load(pkt):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
#create full load from load fragments
load = pkt[Raw].load
pkt_frag_loads = frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
return full_load
def frag_remover(ack, load):
'''
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
'''
global pkt_frag_loads
# Keep the number of IP:port mappings below 50
# last=False pops the oldest item rather than the latest
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
# Loop through a deep copy dict but modify the original dict
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
# Keep 25 ack:load's per ip:port
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
# Keep the load less than 75,000 chars
for ack in copy_pkt_frag_loads[ip_port]:
# If load > 5000 chars, just keep the last 200 chars
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
return pkt_frag_loads
def frag_joiner(ack, src_ip_port, load):
'''
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
'''
global pkt_frag_loads
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
# Make pkt_frag_loads[src_ip_port][ack] = full load
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
|
flexible
|
{
"blob_id": "3e0bc91b81d0f503b78c9ac685b05b7ecb754e28",
"index": 3460,
"step-1": "<mask token>\n\n\ndef get_load(pkt):\n ack = str(pkt[TCP].ack)\n seq = str(pkt[TCP].seq)\n src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)\n dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)\n load = pkt[Raw].load\n pkt_frag_loads = frag_remover(ack, load)\n pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)\n full_load = pkt_frag_loads[src_ip_port][ack]\n return full_load\n\n\ndef frag_remover(ack, load):\n \"\"\"\n Keep the FILO OrderedDict of frag loads from getting too large\n 3 points of limit:\n Number of ip_ports < 50\n Number of acks per ip:port < 25\n Number of chars in load < 5000\n \"\"\"\n global pkt_frag_loads\n while len(pkt_frag_loads) > 50:\n pkt_frag_loads.popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n if len(copy_pkt_frag_loads[ip_port]) > 0:\n while len(copy_pkt_frag_loads[ip_port]) > 25:\n pkt_frag_loads[ip_port].popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n for ack in copy_pkt_frag_loads[ip_port]:\n if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:\n pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][\n -200:]\n return pkt_frag_loads\n\n\ndef frag_joiner(ack, src_ip_port, load):\n \"\"\"\n Keep a store of previous fragments in an OrderedDict named pkt_frag_loads\n \"\"\"\n global pkt_frag_loads\n for ip_port in pkt_frag_loads:\n if src_ip_port == ip_port:\n if ack in pkt_frag_loads[src_ip_port]:\n old_load = pkt_frag_loads[src_ip_port][ack]\n concat_load = old_load + load\n return OrderedDict([(ack, concat_load)])\n return OrderedDict([(ack, load)])\n",
"step-2": "<mask token>\nlogging.getLogger('scapy.runtime').setLevel(logging.ERROR)\n<mask token>\n\n\ndef get_load(pkt):\n ack = str(pkt[TCP].ack)\n seq = str(pkt[TCP].seq)\n src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)\n dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)\n load = pkt[Raw].load\n pkt_frag_loads = frag_remover(ack, load)\n pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)\n full_load = pkt_frag_loads[src_ip_port][ack]\n return full_load\n\n\ndef frag_remover(ack, load):\n \"\"\"\n Keep the FILO OrderedDict of frag loads from getting too large\n 3 points of limit:\n Number of ip_ports < 50\n Number of acks per ip:port < 25\n Number of chars in load < 5000\n \"\"\"\n global pkt_frag_loads\n while len(pkt_frag_loads) > 50:\n pkt_frag_loads.popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n if len(copy_pkt_frag_loads[ip_port]) > 0:\n while len(copy_pkt_frag_loads[ip_port]) > 25:\n pkt_frag_loads[ip_port].popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n for ack in copy_pkt_frag_loads[ip_port]:\n if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:\n pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][\n -200:]\n return pkt_frag_loads\n\n\ndef frag_joiner(ack, src_ip_port, load):\n \"\"\"\n Keep a store of previous fragments in an OrderedDict named pkt_frag_loads\n \"\"\"\n global pkt_frag_loads\n for ip_port in pkt_frag_loads:\n if src_ip_port == ip_port:\n if ack in pkt_frag_loads[src_ip_port]:\n old_load = pkt_frag_loads[src_ip_port][ack]\n concat_load = old_load + load\n return OrderedDict([(ack, concat_load)])\n return OrderedDict([(ack, load)])\n",
"step-3": "<mask token>\nlogging.getLogger('scapy.runtime').setLevel(logging.ERROR)\n<mask token>\nconf.verb = 0\n<mask token>\npkt_frag_loads = OrderedDict()\n\n\ndef get_load(pkt):\n ack = str(pkt[TCP].ack)\n seq = str(pkt[TCP].seq)\n src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)\n dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)\n load = pkt[Raw].load\n pkt_frag_loads = frag_remover(ack, load)\n pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)\n full_load = pkt_frag_loads[src_ip_port][ack]\n return full_load\n\n\ndef frag_remover(ack, load):\n \"\"\"\n Keep the FILO OrderedDict of frag loads from getting too large\n 3 points of limit:\n Number of ip_ports < 50\n Number of acks per ip:port < 25\n Number of chars in load < 5000\n \"\"\"\n global pkt_frag_loads\n while len(pkt_frag_loads) > 50:\n pkt_frag_loads.popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n if len(copy_pkt_frag_loads[ip_port]) > 0:\n while len(copy_pkt_frag_loads[ip_port]) > 25:\n pkt_frag_loads[ip_port].popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n for ack in copy_pkt_frag_loads[ip_port]:\n if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:\n pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][\n -200:]\n return pkt_frag_loads\n\n\ndef frag_joiner(ack, src_ip_port, load):\n \"\"\"\n Keep a store of previous fragments in an OrderedDict named pkt_frag_loads\n \"\"\"\n global pkt_frag_loads\n for ip_port in pkt_frag_loads:\n if src_ip_port == ip_port:\n if ack in pkt_frag_loads[src_ip_port]:\n old_load = pkt_frag_loads[src_ip_port][ack]\n concat_load = old_load + load\n return OrderedDict([(ack, concat_load)])\n return OrderedDict([(ack, load)])\n",
"step-4": "import logging\nlogging.getLogger('scapy.runtime').setLevel(logging.ERROR)\nfrom scapy.all import *\nconf.verb = 0\nfrom collections import OrderedDict\npkt_frag_loads = OrderedDict()\n\n\ndef get_load(pkt):\n ack = str(pkt[TCP].ack)\n seq = str(pkt[TCP].seq)\n src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)\n dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)\n load = pkt[Raw].load\n pkt_frag_loads = frag_remover(ack, load)\n pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)\n full_load = pkt_frag_loads[src_ip_port][ack]\n return full_load\n\n\ndef frag_remover(ack, load):\n \"\"\"\n Keep the FILO OrderedDict of frag loads from getting too large\n 3 points of limit:\n Number of ip_ports < 50\n Number of acks per ip:port < 25\n Number of chars in load < 5000\n \"\"\"\n global pkt_frag_loads\n while len(pkt_frag_loads) > 50:\n pkt_frag_loads.popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n if len(copy_pkt_frag_loads[ip_port]) > 0:\n while len(copy_pkt_frag_loads[ip_port]) > 25:\n pkt_frag_loads[ip_port].popitem(last=False)\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n for ack in copy_pkt_frag_loads[ip_port]:\n if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:\n pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][\n -200:]\n return pkt_frag_loads\n\n\ndef frag_joiner(ack, src_ip_port, load):\n \"\"\"\n Keep a store of previous fragments in an OrderedDict named pkt_frag_loads\n \"\"\"\n global pkt_frag_loads\n for ip_port in pkt_frag_loads:\n if src_ip_port == ip_port:\n if ack in pkt_frag_loads[src_ip_port]:\n old_load = pkt_frag_loads[src_ip_port][ack]\n concat_load = old_load + load\n return OrderedDict([(ack, concat_load)])\n return OrderedDict([(ack, load)])\n",
"step-5": "# dealing with the packet fragments and their reconsttruction \n\nimport logging\n# shut up scapy\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\nfrom scapy.all import *\nconf.verb=0\nfrom collections import OrderedDict\n\n\npkt_frag_loads = OrderedDict()\n\ndef get_load(pkt):\n ack = str(pkt[TCP].ack)\n seq = str(pkt[TCP].seq)\n src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)\n dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)\n\n #create full load from load fragments\t\n load = pkt[Raw].load\n pkt_frag_loads = frag_remover(ack, load)\n pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)\n full_load = pkt_frag_loads[src_ip_port][ack]\n\n return full_load\n\ndef frag_remover(ack, load):\n '''\n Keep the FILO OrderedDict of frag loads from getting too large\n 3 points of limit:\n Number of ip_ports < 50\n Number of acks per ip:port < 25\n Number of chars in load < 5000\n '''\n\n global pkt_frag_loads\n\n # Keep the number of IP:port mappings below 50\n # last=False pops the oldest item rather than the latest\n while len(pkt_frag_loads) > 50:\n pkt_frag_loads.popitem(last=False)\n\n # Loop through a deep copy dict but modify the original dict\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n if len(copy_pkt_frag_loads[ip_port]) > 0:\n # Keep 25 ack:load's per ip:port\n while len(copy_pkt_frag_loads[ip_port]) > 25:\n pkt_frag_loads[ip_port].popitem(last=False)\n\n # Recopy the new dict to prevent KeyErrors for modifying dict in loop\n copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)\n for ip_port in copy_pkt_frag_loads:\n # Keep the load less than 75,000 chars\n for ack in copy_pkt_frag_loads[ip_port]:\n # If load > 5000 chars, just keep the last 200 chars\n if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:\n pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]\n return pkt_frag_loads\n\ndef frag_joiner(ack, src_ip_port, load):\n '''\n Keep a store of previous fragments in an OrderedDict named pkt_frag_loads\n '''\n\n global pkt_frag_loads\n\n for ip_port in pkt_frag_loads:\n if src_ip_port == ip_port:\n if ack in pkt_frag_loads[src_ip_port]:\n # Make pkt_frag_loads[src_ip_port][ack] = full load\n old_load = pkt_frag_loads[src_ip_port][ack]\n concat_load = old_load + load\n return OrderedDict([(ack, concat_load)])\n\n return OrderedDict([(ack, load)])\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.contrib.auth import get_user_model
from django.test import TestCase
from .models import Order
from markets.models import Market
from tickers.models import Ticker
from trades.models import Trade
USER_MODEL = get_user_model()
class Matching:
@staticmethod
def get_bid_ask( market : Market):
bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("-price")
ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("price")
bid_price = None
ask_price = None
if len(bid) > 0:
bid_price = bid[0].price
if len(ask) > 0:
ask_price = ask[0].price
return bid_price, ask_price
@staticmethod
def take(order: Order):
depth = []
if order.side == Order.SIDES_SELL:
depth = order.market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("-price")
if order.side == Order.SIDES_BUY:
depth = order.market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("price")
for o in depth:
if (order.side == Order.SIDES_SELL and order.price != 0 and order.price > o.price) or (order.side == Order.SIDES_BUY and order.price != 0 and order.price < o.price):
break
if order.size - order.filled > o.size - o.filled:
fill_size = o.size - o.filled
else:
fill_size = order.size - order.filled
o.fill( fill_size )
order.fill( fill_size )
o.save()
order.save()
if order.side == Order.SIDES_SELL:
order_buy = o
order_sell = order
else:
order_buy = order
order_sell = o
# Creating trade object
Trade.objects.create(order_buy=order_buy, order_sell=order_sell, price=o.price,side=order.side)
if order.status == Order.STATUS_FILLED:
break
@staticmethod
def process_order(self, order: Order ):
if order.status == Order.STATUS_WAITING_NEW:
order.status = Order.STATUS_NEW
order.save()
# best_bid_price, best_ask_price = self.get_bid_ask(order.market)
if order.price == 0:
Matching.take(order)
order.status = Order.STATUS_FILLED
order.save()
if order.price != 0:
Matching.take(order)
class TestOrder(TestCase):
def setUp(self) -> None:
self.ticker1 = Ticker.objects.create(name="USD")
self.ticker2 = Ticker.objects.create(name="EUR")
self.market = Market.objects.create(name="USD/EUR",
base_currency=self.ticker1,
quote_currency=self.ticker2)
self.Alice = USER_MODEL.objects.create_user(username="Alice", email="alice@yy.ru", password="hhhh")
self.Bob = USER_MODEL.objects.create_user(username="Bob", email="bob@yy.ru", password="hhhh")
# Creating Alice orders
for i in range(20):
Order.objects.create(sender=self.Alice,
side=Order.SIDES_SELL,
price=10000 + i*100,
size=100000+i*10000,
filled=0,
status=Order.STATUS_WAITING_NEW,
hash_signature="SIGA",
market=self.market)
# Creating Alice orders
for i in range(20):
Order.objects.create(sender=self.Alice,
side=Order.SIDES_BUY,
price=10000 - i*100,
size=100000+i*10000,
filled=0,
status=Order.STATUS_WAITING_NEW,
hash_signature="SIGA",
market=self.market)
# Creating Bob orders
for i in range(2):
Order.objects.create(sender=self.Bob,
side=Order.SIDES_BUY,
price=0,
size=100000+i*10000,
filled=0,
status=Order.STATUS_WAITING_NEW,
hash_signature="SIGA",
market=self.market)
def test_market_exists(self):
assert Market.objects.all().count() == 1
def test_orders_created(self):
#assert Market.objects.all()[0].order_set.count() == 40
print("---BID----")
for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY).exclude(price=0).order_by("-price", "created_at"):
print(order.price, order.size)
print("---ASK----")
for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL).exclude(price=0).order_by("price", "created_at"):
print(order.price, order.size)
def test_get_level_1(self):
bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by("-price")
ask = Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by("price")
bid_price = None
ask_price = None
if len(bid) > 0:
bid_price = bid[0].price
if len(ask) > 0:
ask_price = ask[0].price
print(f'Bid {bid_price} Ask {ask_price}')
|
normal
|
{
"blob_id": "866ee2c4fa52bf9bda4730c7a9d46bb4798adcd4",
"index": 1775,
"step-1": "<mask token>\n\n\nclass Matching:\n <mask token>\n <mask token>\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='alice@yy.ru', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n 'bob@yy.ru', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-2": "<mask token>\n\n\nclass Matching:\n\n @staticmethod\n def get_bid_ask(market: Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n return bid_price, ask_price\n <mask token>\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='alice@yy.ru', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n 'bob@yy.ru', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-3": "<mask token>\n\n\nclass Matching:\n\n @staticmethod\n def get_bid_ask(market: Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n return bid_price, ask_price\n\n @staticmethod\n def take(order: Order):\n depth = []\n if order.side == Order.SIDES_SELL:\n depth = order.market.order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n if order.side == Order.SIDES_BUY:\n depth = order.market.order_set.filter(side=Order.SIDES_SELL,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n for o in depth:\n if (order.side == Order.SIDES_SELL and order.price != 0 and \n order.price > o.price or order.side == Order.SIDES_BUY and \n order.price != 0 and order.price < o.price):\n break\n if order.size - order.filled > o.size - o.filled:\n fill_size = o.size - o.filled\n else:\n fill_size = order.size - order.filled\n o.fill(fill_size)\n order.fill(fill_size)\n o.save()\n order.save()\n if order.side == Order.SIDES_SELL:\n order_buy = o\n order_sell = order\n else:\n order_buy = order\n order_sell = o\n Trade.objects.create(order_buy=order_buy, order_sell=order_sell,\n price=o.price, side=order.side)\n if order.status == Order.STATUS_FILLED:\n break\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='alice@yy.ru', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n 'bob@yy.ru', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-4": "from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom .models import Order\nfrom markets.models import Market\nfrom tickers.models import Ticker\nfrom trades.models import Trade\nUSER_MODEL = get_user_model()\n\n\nclass Matching:\n\n @staticmethod\n def get_bid_ask(market: Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n return bid_price, ask_price\n\n @staticmethod\n def take(order: Order):\n depth = []\n if order.side == Order.SIDES_SELL:\n depth = order.market.order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n if order.side == Order.SIDES_BUY:\n depth = order.market.order_set.filter(side=Order.SIDES_SELL,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n for o in depth:\n if (order.side == Order.SIDES_SELL and order.price != 0 and \n order.price > o.price or order.side == Order.SIDES_BUY and \n order.price != 0 and order.price < o.price):\n break\n if order.size - order.filled > o.size - o.filled:\n fill_size = o.size - o.filled\n else:\n fill_size = order.size - order.filled\n o.fill(fill_size)\n order.fill(fill_size)\n o.save()\n order.save()\n if order.side == Order.SIDES_SELL:\n order_buy = o\n order_sell = order\n else:\n order_buy = order\n order_sell = o\n Trade.objects.create(order_buy=order_buy, order_sell=order_sell,\n price=o.price, side=order.side)\n if order.status == Order.STATUS_FILLED:\n break\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='alice@yy.ru', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n 'bob@yy.ru', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-5": "from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom .models import Order\nfrom markets.models import Market\nfrom tickers.models import Ticker\nfrom trades.models import Trade\n\n\nUSER_MODEL = get_user_model()\n\n\nclass Matching:\n @staticmethod\n def get_bid_ask( market : Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"-price\")\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"price\")\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n\n return bid_price, ask_price\n\n @staticmethod\n def take(order: Order):\n depth = []\n if order.side == Order.SIDES_SELL:\n depth = order.market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"-price\")\n if order.side == Order.SIDES_BUY:\n depth = order.market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"price\")\n for o in depth:\n if (order.side == Order.SIDES_SELL and order.price != 0 and order.price > o.price) or (order.side == Order.SIDES_BUY and order.price != 0 and order.price < o.price):\n break\n if order.size - order.filled > o.size - o.filled:\n fill_size = o.size - o.filled\n else:\n fill_size = order.size - order.filled\n o.fill( fill_size )\n order.fill( fill_size )\n o.save()\n order.save()\n if order.side == Order.SIDES_SELL:\n order_buy = o\n order_sell = order\n else:\n order_buy = order\n order_sell = o\n\n # Creating trade object\n Trade.objects.create(order_buy=order_buy, order_sell=order_sell, price=o.price,side=order.side)\n\n if order.status == Order.STATUS_FILLED:\n break\n\n @staticmethod\n def process_order(self, order: Order ):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n\n# best_bid_price, best_ask_price = self.get_bid_ask(order.market)\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) -> None:\n self.ticker1 = Ticker.objects.create(name=\"USD\")\n self.ticker2 = Ticker.objects.create(name=\"EUR\")\n self.market = Market.objects.create(name=\"USD/EUR\",\n base_currency=self.ticker1,\n quote_currency=self.ticker2)\n\n self.Alice = USER_MODEL.objects.create_user(username=\"Alice\", email=\"alice@yy.ru\", password=\"hhhh\")\n self.Bob = USER_MODEL.objects.create_user(username=\"Bob\", email=\"bob@yy.ru\", password=\"hhhh\")\n\n # Creating Alice orders\n for i in range(20):\n Order.objects.create(sender=self.Alice,\n side=Order.SIDES_SELL,\n price=10000 + i*100,\n size=100000+i*10000,\n filled=0,\n status=Order.STATUS_WAITING_NEW,\n hash_signature=\"SIGA\",\n market=self.market)\n\n # Creating Alice orders\n for i in range(20):\n Order.objects.create(sender=self.Alice,\n side=Order.SIDES_BUY,\n price=10000 - i*100,\n size=100000+i*10000,\n filled=0,\n status=Order.STATUS_WAITING_NEW,\n hash_signature=\"SIGA\",\n market=self.market)\n\n # Creating Bob orders\n for i in range(2):\n Order.objects.create(sender=self.Bob,\n side=Order.SIDES_BUY,\n price=0,\n size=100000+i*10000,\n filled=0,\n status=Order.STATUS_WAITING_NEW,\n hash_signature=\"SIGA\",\n market=self.market)\n\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n #assert Market.objects.all()[0].order_set.count() == 40\n print(\"---BID----\")\n for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY).exclude(price=0).order_by(\"-price\", \"created_at\"):\n print(order.price, order.size)\n\n print(\"---ASK----\")\n for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL).exclude(price=0).order_by(\"price\", \"created_at\"):\n print(order.price, order.size)\n\n\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\"-price\")\n ask = Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\"price\")\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n\n print(f'Bid {bid_price} Ask {ask_price}')\n\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: André Pacheco
E-mail: pacheco.comp@gmail.com
This file implements the methods and functions to load the image as a PyTorch dataset
If you find any bug or have some suggestion, please, email me.
"""
from PIL import Image
from torch.utils import data
import torchvision.transforms as transforms
class BuildDataset (data.Dataset):
"""
This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset
class and implement the following methods: __len__, __getitem__ and the constructor __init__
"""
def __init__(self, imgs_path, labels, extra_info=None, transform=None):
"""
The constructor gets the images path and their respectively labels and extra information (if it exists).
In addition, you can specify some transform operation to be carry out on the images.
It's important to note the images must match with the labels (an extra information if exist). For example, the
imgs_path[x]'s label must take place on labels[x].
Parameters:
:param imgs_path (list): a list of string containing the image paths
:param labels (list) a list of labels for each image
:param extra_info (list): a list of extra information regarding each image. If None, there is no information.
Defaul is None.
:param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images
"""
self.imgs_path = imgs_path
self.labels = labels
self.extra_info = extra_info
# if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got
# an exception
if (transform is not None):
self.transform = transform
else:
self.transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor()
])
def __len__(self):
""" This method just returns the dataset size """
return len(self.imgs_path)
def __getitem__(self, item):
"""
It gets the image, labels and extra information (if it exists) according to the index informed in `item`.
It also performs the transform on the image.
:param item (int): an index in the interval [0, ..., len(img_paths)-1]
:return (tuple): a tuple containing the image, its label and extra information (if it exists)
"""
image = Image.open(self.imgs_path[item]).convert("RGB")
# Applying the transformations
image = self.transform(image)
img_name = self.imgs_path[item].split('/')[-1].split('.')[0]
# print(self.labels[item])
# print(self.extra_info[item])
if self.extra_info is None:
extra_info = []
else:
extra_info = self.extra_info[item]
if self.labels is None:
labels = []
else:
labels = self.labels[item]
return image, labels, extra_info, img_name
def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):
"""
This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader
for these files. You also can set some transformations using torchvision.transforms in order to perform data
augmentation. Lastly, params is a dictionary that you can set the following parameters:
batch_size (int): the batch size for the dataset. If it's not informed the default is 30
shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True
num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which
:param imgs_path (list): a list of string containing the images path
:param labels (list): a list of labels for each image
:param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's
no extra information. Default is None
:param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data
augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the
augmentation. If it's None, none augmentation will be perform. Default is None
:param params (dictionary, optional): this dictionary contains the following parameters:
batch_size: the batch size. If the key is not informed or params = None, the default value will be 30
shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,
the default value will be True
num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default
value will be 4
pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,
the default value will be True
:return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params
"""
dt = BuildDataset(imgs_path, labels, extra_info, transform)
# Checking the params values. If it's not defined in params of if params is None, the default values are described
# below:
batch_size = 30
shuf = True
num_workers = 4
pin_memory = True
# However, if the params is defined, we used the values described on it:
if (params is not None):
if ('batch_size' in params.keys()):
batch_size = params['batch_size']
if ('shuf' in params.keys()):
shuf = params['shuf']
if ('num_workers' in params.keys()):
num_workers = params['num_workers']
if ('pin_memory' in params.keys()):
pin_memory = params['pin_memory']
# Calling the dataloader
dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,
pin_memory=pin_memory)
return dl
|
normal
|
{
"blob_id": "4e31c2a80bec77a1f5aafc8a91617fb4b2941788",
"index": 432,
"step-1": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n <mask token>\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader(imgs_path, labels, extra_info=None, transform=None,\n params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n if params is not None:\n if 'batch_size' in params.keys():\n batch_size = params['batch_size']\n if 'shuf' in params.keys():\n shuf = params['shuf']\n if 'num_workers' in params.keys():\n num_workers = params['num_workers']\n if 'pin_memory' in params.keys():\n pin_memory = params['pin_memory']\n dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf,\n num_workers=num_workers, pin_memory=pin_memory)\n return dl\n",
"step-4": "<mask token>\nfrom PIL import Image\nfrom torch.utils import data\nimport torchvision.transforms as transforms\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader(imgs_path, labels, extra_info=None, transform=None,\n params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n if params is not None:\n if 'batch_size' in params.keys():\n batch_size = params['batch_size']\n if 'shuf' in params.keys():\n shuf = params['shuf']\n if 'num_workers' in params.keys():\n num_workers = params['num_workers']\n if 'pin_memory' in params.keys():\n pin_memory = params['pin_memory']\n dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf,\n num_workers=num_workers, pin_memory=pin_memory)\n return dl\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: André Pacheco\nE-mail: pacheco.comp@gmail.com\n\nThis file implements the methods and functions to load the image as a PyTorch dataset\n\nIf you find any bug or have some suggestion, please, email me.\n\"\"\"\n\nfrom PIL import Image\nfrom torch.utils import data\nimport torchvision.transforms as transforms\n\n\nclass BuildDataset (data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n\n # if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got\n # an exception\n if (transform is not None):\n self.transform = transform\n else:\n self.transform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor()\n ])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n\n image = Image.open(self.imgs_path[item]).convert(\"RGB\")\n\n # Applying the transformations\n image = self.transform(image)\n\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n # print(self.labels[item])\n # print(self.extra_info[item])\n\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n\n\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n\n # Checking the params values. If it's not defined in params of if params is None, the default values are described\n # below:\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n\n # However, if the params is defined, we used the values described on it:\n if (params is not None):\n if ('batch_size' in params.keys()):\n batch_size = params['batch_size']\n if ('shuf' in params.keys()):\n shuf = params['shuf']\n if ('num_workers' in params.keys()):\n num_workers = params['num_workers']\n if ('pin_memory' in params.keys()):\n pin_memory = params['pin_memory']\n\n # Calling the dataloader\n dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,\n pin_memory=pin_memory)\n\n return dl\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), (
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), (0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -
1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1,
-1), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1
), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -
1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
-1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
-1, -1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -
1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -
1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1,
-1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1,
-1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1), (
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,
1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,
1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,
-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
-1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))
inputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1,
-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1,
-1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))
inputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1,
-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -
1, -1, -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
inputDataSet = SupervisedDataSet(35, 20)
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), (
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), (0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -
1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1,
-1), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1
), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -
1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
-1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
-1, -1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -
1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -
1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1,
-1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1,
-1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1), (
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,
1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,
1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,
-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
-1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))
inputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1,
-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1,
-1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))
inputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1,
-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -
1, -1, -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
<|reserved_special_token_1|>
from pybrain3.datasets import SupervisedDataSet
inputDataSet = SupervisedDataSet(35, 20)
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), (
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), (0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -
1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1,
-1), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1
), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -
1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
-1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
-1, -1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -
1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -
1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1,
-1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1,
-1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,
1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1), (
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,
1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -
1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,
1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))
inputDataSet.addSample((1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,
-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
-1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))
inputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))
inputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1,
-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1,
-1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))
inputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1,
-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -
1, -1, -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
<|reserved_special_token_1|>
from pybrain3.datasets import SupervisedDataSet
inputDataSet = SupervisedDataSet(35, 20) #Creating new DataSet
#A
inputDataSet.addSample(( #Adding first sample to dataset
-1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1
),
(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0))
#B
inputDataSet.addSample((
1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, -1
),
(0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#C
inputDataSet.addSample((
-1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1
),
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#D
inputDataSet.addSample((
1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, -1
),
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#F
inputDataSet.addSample((
1, 1, 1, 1, 1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1
),
(0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#G
inputDataSet.addSample((
-1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, -1,
1, -1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#H
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#I
inputDataSet.addSample((
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#K
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, 1, -1,
1, -1, 1, -1, -1,
1, 1, -1, -1, -1,
1, -1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#U
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#M
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, 1, -1, 1, 1,
1, -1, 1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#E
inputDataSet.addSample((
1, 1, 1, 1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))
#L
inputDataSet.addSample((
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
#O
inputDataSet.addSample((
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
#P
inputDataSet.addSample((
1, 1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, 1, -1,
1, 1, 1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))
#R
inputDataSet.addSample((
1, 1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, 1, -1,
1, 1, 1, -1, -1,
1, -1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))
#T
inputDataSet.addSample((
1, 1, 1, 1, 1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))
#W
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, 1, -1, 1,
1, 1, -1, 1, 1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))
#X
inputDataSet.addSample((
-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1,
1, -1, -1, -1, 1,
-1, 1, -1, 1, -1,
-1, -1, 1, -1, -1,
-1, 1, -1, 1, -1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))
#Y
inputDataSet.addSample((
-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1,
1, -1, -1, -1, 1,
-1, 1, -1, 1, -1,
-1, -1, 1, -1, -1,
-1, 1, -1, -1, -1,
1, -1, -1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
|
flexible
|
{
"blob_id": "a2569ccd509fa755f4cad026f483bcf891c6fb41",
"index": 8120,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), (\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), (0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -\n 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1,\n -1), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),\n (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,\n 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1\n ), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -\n 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),\n (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,\n -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,\n -1, -1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -\n 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, \n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -\n 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1,\n -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1,\n -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, \n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1), (\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,\n -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))\ninputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, \n -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1,\n -1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))\ninputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, \n -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -\n 1, -1, -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n",
"step-3": "<mask token>\ninputDataSet = SupervisedDataSet(35, 20)\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), (\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), (0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -\n 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1,\n -1), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),\n (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,\n 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1\n ), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -\n 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),\n (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,\n -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,\n -1, -1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -\n 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, \n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -\n 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1,\n -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1,\n -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, \n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1), (\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,\n -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))\ninputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, \n -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1,\n -1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))\ninputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, \n -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -\n 1, -1, -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n",
"step-4": "from pybrain3.datasets import SupervisedDataSet\ninputDataSet = SupervisedDataSet(35, 20)\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), (\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), (0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -\n 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1,\n -1), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),\n (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,\n 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1\n ), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -\n 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),\n (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((-1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,\n -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,\n -1, -1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -\n 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, \n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -\n 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1,\n -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1,\n -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, \n 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1), (\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -\n 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))\ninputDataSet.addSample((1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,\n -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))\ninputDataSet.addSample((1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, \n 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))\ninputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, \n -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1,\n -1, 1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))\ninputDataSet.addSample((-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, \n -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -\n 1, -1, -1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))\n",
"step-5": "from pybrain3.datasets import SupervisedDataSet\n\ninputDataSet = SupervisedDataSet(35, 20) #Creating new DataSet\n\n#A\ninputDataSet.addSample(( #Adding first sample to dataset\n -1, 1, 1, 1, -1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, 1, 1, 1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1\n ),\n (1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0))\n\n#B\ninputDataSet.addSample((\n 1, 1, 1, 1, -1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, 1, 1, 1, -1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, 1, 1, 1, -1\n ),\n (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#C\ninputDataSet.addSample((\n -1, 1, 1, 1, -1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, 1,\n -1, 1, 1, 1, -1\n ),\n (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#D\ninputDataSet.addSample((\n 1, 1, 1, 1, -1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, 1, 1, 1, -1\n ),\n (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#F\ninputDataSet.addSample((\n 1, 1, 1, 1, 1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, 1, 1, 1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1\n ),\n (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#G\ninputDataSet.addSample((\n -1, 1, 1, 1, -1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, -1,\n 1, -1, 1, 1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n -1, 1, 1, 1, -1\n ),\n (0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#H\ninputDataSet.addSample((\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, 1, 1, 1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1\n ),\n (0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#I\ninputDataSet.addSample((\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#K\ninputDataSet.addSample((\n 1, -1, -1, -1, 1,\n 1, -1, -1, 1, -1,\n 1, -1, 1, -1, -1,\n 1, 1, -1, -1, -1,\n 1, -1, 1, -1, -1,\n 1, -1, -1, 1, -1,\n 1, -1, -1, -1, 1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#U\ninputDataSet.addSample((\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n -1, 1, 1, 1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#M\ninputDataSet.addSample((\n 1, -1, -1, -1, 1,\n 1, 1, -1, 1, 1,\n 1, -1, 1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#E\ninputDataSet.addSample((\n 1, 1, 1, 1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, 1, 1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, 1, 1, 1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))\n\n#L\ninputDataSet.addSample((\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, 1, 1, 1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))\n\n#O\ninputDataSet.addSample((\n 1, 1, 1, 1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, 1, 1, 1, 1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))\n\n#P\ninputDataSet.addSample((\n 1, 1, 1, -1, -1,\n 1, -1, -1, 1, -1,\n 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1,\n 1, -1, -1, -1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))\n\n#R\ninputDataSet.addSample((\n 1, 1, 1, -1, -1,\n 1, -1, -1, 1, -1,\n 1, -1, -1, 1, -1,\n 1, 1, 1, -1, -1,\n 1, -1, 1, -1, -1,\n 1, -1, -1, 1, -1,\n 1, -1, -1, -1, 1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))\n\n#T\ninputDataSet.addSample((\n 1, 1, 1, 1, 1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1,\n -1, -1, 1, -1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))\n\n#W\ninputDataSet.addSample((\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, -1, -1, 1,\n 1, -1, 1, -1, 1,\n 1, 1, -1, 1, 1,\n 1, -1, -1, -1, 1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))\n\n#X\ninputDataSet.addSample((\n -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1,\n 1, -1, -1, -1, 1,\n -1, 1, -1, 1, -1,\n -1, -1, 1, -1, -1,\n -1, 1, -1, 1, -1,\n 1, -1, -1, -1, 1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))\n\n#Y\ninputDataSet.addSample((\n -1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1,\n 1, -1, -1, -1, 1,\n -1, 1, -1, 1, -1,\n -1, -1, 1, -1, -1,\n -1, 1, -1, -1, -1,\n 1, -1, -1, -1, -1\n ),\n (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
win.show()
app.exec_()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = qt.QApplication([])
win = mainWindow.RIXSMainWindow()
win.show()
app.exec_()
<|reserved_special_token_1|>
from PyMca5.PyMcaGui import PyMcaQt as qt
from RixsTool import mainWindow
app = qt.QApplication([])
win = mainWindow.RIXSMainWindow()
win.show()
app.exec_()
<|reserved_special_token_1|>
#!/usr/bin/python
from PyMca5.PyMcaGui import PyMcaQt as qt
from RixsTool import mainWindow
app = qt.QApplication([])
win = mainWindow.RIXSMainWindow()
win.show()
app.exec_()
|
flexible
|
{
"blob_id": "34c8541e640596f51a5232cba06172df5814db14",
"index": 7734,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwin.show()\napp.exec_()\n",
"step-3": "<mask token>\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n",
"step-4": "from PyMca5.PyMcaGui import PyMcaQt as qt\nfrom RixsTool import mainWindow\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n",
"step-5": "#!/usr/bin/python\n\nfrom PyMca5.PyMcaGui import PyMcaQt as qt\nfrom RixsTool import mainWindow\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
class Role:
"""
角色类
卧底
平民
"""
def __init__(self,key_word="",role_id = 0):
self.key_word = key_word
self.role_id = role_id #平民-0;卧底-1;
class User(Role):
"""
用户类
玩家
"""
def __init__(self,id,role_id):
self.id = id #玩家id
self.role_id = role_id
|
normal
|
{
"blob_id": "3b5141a86948df6632612f6c9d7fc0089acc60aa",
"index": 5981,
"step-1": "<mask token>\n\n\nclass Role:\n <mask token>\n <mask token>\n\n\nclass User(Role):\n \"\"\"\n 用户类\n 玩家\n \"\"\"\n\n def __init__(self, id, role_id):\n self.id = id\n self.role_id = role_id\n",
"step-2": "<mask token>\n\n\nclass Role:\n <mask token>\n\n def __init__(self, key_word='', role_id=0):\n self.key_word = key_word\n self.role_id = role_id\n\n\nclass User(Role):\n \"\"\"\n 用户类\n 玩家\n \"\"\"\n\n def __init__(self, id, role_id):\n self.id = id\n self.role_id = role_id\n",
"step-3": "<mask token>\n\n\nclass Role:\n \"\"\"\n 角色类\n 卧底\n 平民\n \"\"\"\n\n def __init__(self, key_word='', role_id=0):\n self.key_word = key_word\n self.role_id = role_id\n\n\nclass User(Role):\n \"\"\"\n 用户类\n 玩家\n \"\"\"\n\n def __init__(self, id, role_id):\n self.id = id\n self.role_id = role_id\n",
"step-4": "import random\n\n\nclass Role:\n \"\"\"\n 角色类\n 卧底\n 平民\n \"\"\"\n\n def __init__(self, key_word='', role_id=0):\n self.key_word = key_word\n self.role_id = role_id\n\n\nclass User(Role):\n \"\"\"\n 用户类\n 玩家\n \"\"\"\n\n def __init__(self, id, role_id):\n self.id = id\n self.role_id = role_id\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport random\n\n\nclass Role:\n\n \"\"\"\n 角色类\n 卧底\n 平民\n \"\"\"\n def __init__(self,key_word=\"\",role_id = 0):\n self.key_word = key_word\n self.role_id = role_id #平民-0;卧底-1;\n\nclass User(Role):\n \"\"\"\n 用户类\n 玩家\n \"\"\"\n def __init__(self,id,role_id):\n self.id = id #玩家id\n self.role_id = role_id\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class TrieNode:
def __init__(self):
self.children: Dict[str, TrieNode] = collections.defaultdict(TrieNode)
self.word: Optional[str] = None
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:
m = len(board)
n = len(board[0])
ans = []
root = TrieNode()
def insert(word: str) ->None:
node = root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.word = word
for word in words:
insert(word)
def dfs(i: int, j: int, node: TrieNode) ->None:
if i < 0 or i == m or j < 0 or j == n:
return
if board[i][j] == '*':
return
c = board[i][j]
if c not in node.children:
return
child = node.children[c]
if child.word:
ans.append(child.word)
child.word = None
board[i][j] = '*'
dfs(i + 1, j, child)
dfs(i - 1, j, child)
dfs(i, j + 1, child)
dfs(i, j - 1, child)
board[i][j] = c
for i in range(m):
for j in range(n):
dfs(i, j, root)
return ans
|
normal
|
{
"blob_id": "f996dffcb9650663278ec1e31d9f88d50142f4ea",
"index": 4491,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:\n m = len(board)\n n = len(board[0])\n ans = []\n root = TrieNode()\n\n def insert(word: str) ->None:\n node = root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n node = node.children[c]\n node.word = word\n for word in words:\n insert(word)\n\n def dfs(i: int, j: int, node: TrieNode) ->None:\n if i < 0 or i == m or j < 0 or j == n:\n return\n if board[i][j] == '*':\n return\n c = board[i][j]\n if c not in node.children:\n return\n child = node.children[c]\n if child.word:\n ans.append(child.word)\n child.word = None\n board[i][j] = '*'\n dfs(i + 1, j, child)\n dfs(i - 1, j, child)\n dfs(i, j + 1, child)\n dfs(i, j - 1, child)\n board[i][j] = c\n for i in range(m):\n for j in range(n):\n dfs(i, j, root)\n return ans\n",
"step-3": "class TrieNode:\n <mask token>\n\n\nclass Solution:\n\n def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:\n m = len(board)\n n = len(board[0])\n ans = []\n root = TrieNode()\n\n def insert(word: str) ->None:\n node = root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n node = node.children[c]\n node.word = word\n for word in words:\n insert(word)\n\n def dfs(i: int, j: int, node: TrieNode) ->None:\n if i < 0 or i == m or j < 0 or j == n:\n return\n if board[i][j] == '*':\n return\n c = board[i][j]\n if c not in node.children:\n return\n child = node.children[c]\n if child.word:\n ans.append(child.word)\n child.word = None\n board[i][j] = '*'\n dfs(i + 1, j, child)\n dfs(i - 1, j, child)\n dfs(i, j + 1, child)\n dfs(i, j - 1, child)\n board[i][j] = c\n for i in range(m):\n for j in range(n):\n dfs(i, j, root)\n return ans\n",
"step-4": "class TrieNode:\n\n def __init__(self):\n self.children: Dict[str, TrieNode] = collections.defaultdict(TrieNode)\n self.word: Optional[str] = None\n\n\nclass Solution:\n\n def findWords(self, board: List[List[str]], words: List[str]) ->List[str]:\n m = len(board)\n n = len(board[0])\n ans = []\n root = TrieNode()\n\n def insert(word: str) ->None:\n node = root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode()\n node = node.children[c]\n node.word = word\n for word in words:\n insert(word)\n\n def dfs(i: int, j: int, node: TrieNode) ->None:\n if i < 0 or i == m or j < 0 or j == n:\n return\n if board[i][j] == '*':\n return\n c = board[i][j]\n if c not in node.children:\n return\n child = node.children[c]\n if child.word:\n ans.append(child.word)\n child.word = None\n board[i][j] = '*'\n dfs(i + 1, j, child)\n dfs(i - 1, j, child)\n dfs(i, j + 1, child)\n dfs(i, j - 1, child)\n board[i][j] = c\n for i in range(m):\n for j in range(n):\n dfs(i, j, root)\n return ans\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# coding: utf-8
import numpy as np
def sparse(n, k):
u"""
return k sparse vector,
the value of non-zero entries are
normal distributed N(0,1).
[args]
n: size of vector
k: number of nonzero entries
[return]
k-sparse vector
"""
z = np.zeros(n)
for i in np.random.choice( np.arange(n), k, replace=None ): # supports of nonzero entries
z[i] = np.random.randn()
return z
def compressible(n, k, e=0.1):
u"""
perform IHT
[args]
n: size of vector
k: number of nonzero entries
e: noise factor (x e)
[return]
k-compressible vector
"""
z = sparse(n, k) + e * np.random.randn(n)
return z
if __name__ == '__main__':
s = 2
print "%s-sparse vector:" % s
print sparse(10, s)
print compressible(10, s, 0.1)
|
normal
|
{
"blob_id": "f0e4cd13571728d61566c4093586c91323629e0b",
"index": 7624,
"step-1": "# coding: utf-8\nimport numpy as np\n\n\n\ndef sparse(n, k):\n u\"\"\"\n return k sparse vector, \n the value of non-zero entries are \n normal distributed N(0,1).\n [args]\n n: size of vector\n k: number of nonzero entries\n [return]\n k-sparse vector\n \"\"\"\n z = np.zeros(n)\n for i in np.random.choice( np.arange(n), k, replace=None ): # supports of nonzero entries\n z[i] = np.random.randn()\n return z\n\n\n\n\ndef compressible(n, k, e=0.1):\n u\"\"\"\n perform IHT \n [args]\n n: size of vector\n k: number of nonzero entries\n e: noise factor (x e)\n [return]\n k-compressible vector\n \"\"\"\n z = sparse(n, k) + e * np.random.randn(n)\n return z\n\n\n\n\n\nif __name__ == '__main__':\n\n\n s = 2\n print \"%s-sparse vector:\" % s\n print sparse(10, s)\n print compressible(10, s, 0.1)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class tenDParameters:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class tenDParameters:
def __init__(self, b: float, DM: float, pm_l: float, pm_b: float, vrad:
float, sb: float, spml: float, spmb: float, sdm: float, vc: float):
self.b = b
self.DM = DM
self.pm_l = pm_l
self.pm_b = pm_b
self.vrad = vrad
self.sb = sb
self.spml = spml
self.spmb = spmb
self.sdm = sdm
self.vc = vc
<|reserved_special_token_1|>
class tenDParameters:
def __init__(self,
b: float,
DM: float,
pm_l: float,
pm_b: float,
vrad: float,
sb: float,
spml: float,
spmb: float,
sdm: float,
vc: float):
self.b = b
self.DM = DM
# this is actually pm_l * cos b, apparently
self.pm_l = pm_l
self.pm_b = pm_b
self.vrad = vrad
self.sb = sb
self.spml = spml
self.spmb = spmb
self.sdm = sdm
self.vc = vc
|
flexible
|
{
"blob_id": "82e7e22293551e061dcb295c52714c22df0ed0ce",
"index": 5678,
"step-1": "<mask token>\n",
"step-2": "class tenDParameters:\n <mask token>\n",
"step-3": "class tenDParameters:\n\n def __init__(self, b: float, DM: float, pm_l: float, pm_b: float, vrad:\n float, sb: float, spml: float, spmb: float, sdm: float, vc: float):\n self.b = b\n self.DM = DM\n self.pm_l = pm_l\n self.pm_b = pm_b\n self.vrad = vrad\n self.sb = sb\n self.spml = spml\n self.spmb = spmb\n self.sdm = sdm\n self.vc = vc\n",
"step-4": "class tenDParameters:\n def __init__(self,\n b: float,\n DM: float,\n pm_l: float,\n pm_b: float,\n vrad: float,\n sb: float,\n spml: float,\n spmb: float,\n sdm: float,\n vc: float):\n self.b = b\n self.DM = DM\n # this is actually pm_l * cos b, apparently\n self.pm_l = pm_l\n self.pm_b = pm_b\n self.vrad = vrad\n self.sb = sb\n self.spml = spml\n self.spmb = spmb\n self.sdm = sdm\n self.vc = vc",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def limitZ(Z, limit=10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if Z[i][j] > limit:
Z[i][j] = np.inf
if Z[i][j] < -limit:
Z[i][j] = -np.inf
def plotPontos3D(X, Y, Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot3D(X, Y, Z, proporcao=1, espelharZ=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X ')
ax.set_ylabel('Y ')
ax.set_zlabel('Z ')
np.floor
colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')
colorsArray = np.empty([len(X), len(Y)], dtype=tuple)
for y in range(len(Y)):
for x in range(len(X)):
colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.
ceil(y / proporcao)) % len(colortuple)]
surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)
if espelharZ:
surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)
def limitZ(Z, limit=10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if Z[i][j] > limit:
Z[i][j] = np.inf
if Z[i][j] < -limit:
Z[i][j] = -np.inf
def plotPontos3D(X, Y, Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot3D(X, Y, Z, proporcao=1, espelharZ=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X ')
ax.set_ylabel('Y ')
ax.set_zlabel('Z ')
np.floor
colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')
colorsArray = np.empty([len(X), len(Y)], dtype=tuple)
for y in range(len(Y)):
for x in range(len(X)):
colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.
ceil(y / proporcao)) % len(colortuple)]
surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)
if espelharZ:
surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)
def limitZ(Z, limit=10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if Z[i][j] > limit:
Z[i][j] = np.inf
if Z[i][j] < -limit:
Z[i][j] = -np.inf
def plotPontos3D(X, Y, Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def curvaNivel(X, Y, Z, levels):
fig = plt.figure()
ax = fig.add_subplot()
curva = ax.contourf(X, Y, Z, levels)
ax.set_xlabel('X')
ax.set_ylabel('Y')
fig.colorbar(curva)
plt.show()
<|reserved_special_token_1|>
from typing import Sequence
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
def plot3D(X, Y, Z, proporcao=1, espelharZ=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X ')
ax.set_ylabel('Y ')
ax.set_zlabel('Z ')
np.floor
colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')
colorsArray = np.empty([len(X), len(Y)], dtype=tuple)
for y in range(len(Y)):
for x in range(len(X)):
colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.
ceil(y / proporcao)) % len(colortuple)]
surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)
if espelharZ:
surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)
def limitZ(Z, limit=10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if Z[i][j] > limit:
Z[i][j] = np.inf
if Z[i][j] < -limit:
Z[i][j] = -np.inf
def plotPontos3D(X, Y, Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def curvaNivel(X, Y, Z, levels):
fig = plt.figure()
ax = fig.add_subplot()
curva = ax.contourf(X, Y, Z, levels)
ax.set_xlabel('X')
ax.set_ylabel('Y')
fig.colorbar(curva)
plt.show()
<|reserved_special_token_1|>
from typing import Sequence
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
def plot3D(X, Y, Z, proporcao=1, espelharZ = False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('X ')
ax.set_ylabel('Y ')
ax.set_zlabel('Z ')
np.floor
colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88'))
colorsArray = np.empty([len(X), len(Y)], dtype=tuple)
for y in range(len(Y)):
for x in range(len(X)):
colorsArray[x, y] = colortuple[int(
np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)]
surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)
if(espelharZ):
surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)
#surf = ax.plot_wireframe(X, Y, Z, linewidth=1)
#plt.show()
def limitZ(Z, limit = 10):
for i in range(len(Z)):
for j in range(len(Z[i])):
if(Z[i][j]>limit):
Z[i][j] = np.inf
if(Z[i][j]<-limit):
Z[i][j] = -np.inf
def plotPontos3D(X,Y,Z):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X, Y, Z, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def curvaNivel(X,Y,Z,levels):
fig = plt.figure()
ax = fig.add_subplot()
curva = ax.contourf(X,Y,Z,levels)
ax.set_xlabel('X')
ax.set_ylabel('Y')
#curva.cmap.set_under('white')
#curva.cmap.set_over('cyan')
fig.colorbar(curva)
plt.show()
|
flexible
|
{
"blob_id": "ff20b65f35614415ad786602c0fc2cabd08124fb",
"index": 4065,
"step-1": "<mask token>\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\ndef curvaNivel(X, Y, Z, levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X, Y, Z, levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig.colorbar(curva)\n plt.show()\n",
"step-4": "from typing import Sequence\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ=False):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88')\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(np.ceil(x / proporcao) + np.\n ceil(y / proporcao)) % len(colortuple)]\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if espelharZ:\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n\n\ndef limitZ(Z, limit=10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if Z[i][j] > limit:\n Z[i][j] = np.inf\n if Z[i][j] < -limit:\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X, Y, Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()\n\n\ndef curvaNivel(X, Y, Z, levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X, Y, Z, levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig.colorbar(curva)\n plt.show()\n",
"step-5": "from typing import Sequence\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\n\n\ndef plot3D(X, Y, Z, proporcao=1, espelharZ = False):\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.set_xlabel('X ')\n ax.set_ylabel('Y ')\n ax.set_zlabel('Z ')\n np.floor\n colortuple = (colors.to_rgba('#FFFF4488'), colors.to_rgb('#4444FF88'))\n colorsArray = np.empty([len(X), len(Y)], dtype=tuple)\n for y in range(len(Y)):\n for x in range(len(X)):\n colorsArray[x, y] = colortuple[int(\n np.ceil(x/proporcao) + np.ceil(y/proporcao)) % len(colortuple)]\n\n surf = ax.plot_surface(X, Y, Z, facecolors=colorsArray, linewidth=0)\n if(espelharZ):\n surf = ax.plot_surface(X, Y, -Z, facecolors=colorsArray, linewidth=0)\n #surf = ax.plot_wireframe(X, Y, Z, linewidth=1)\n\n #plt.show()\n\ndef limitZ(Z, limit = 10):\n for i in range(len(Z)):\n for j in range(len(Z[i])):\n if(Z[i][j]>limit):\n Z[i][j] = np.inf\n if(Z[i][j]<-limit):\n Z[i][j] = -np.inf\n\n\ndef plotPontos3D(X,Y,Z):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(X, Y, Z, marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n plt.show()\n\n\ndef curvaNivel(X,Y,Z,levels):\n fig = plt.figure()\n ax = fig.add_subplot()\n curva = ax.contourf(X,Y,Z,levels)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n #curva.cmap.set_under('white')\n #curva.cmap.set_over('cyan')\n fig.colorbar(curva)\n plt.show()\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
r = fc.get('myapp')
pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),
'Unexpected injection of googletag data={}', r.data)
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup_module(module):
os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
r = fc.get('myapp')
pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),
'Unexpected injection of googletag data={}', r.data)
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'
def setup_module(module):
os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
r = fc.get('myapp')
pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),
'Unexpected injection of googletag data={}', r.data)
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import absolute_import, division, print_function
import pytest
import os
_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'
def setup_module(module):
os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
r = fc.get('myapp')
pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),
'Unexpected injection of googletag data={}', r.data)
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Test(s) for static files
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import os
_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'
def setup_module(module):
os.environ.update(
SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID,
)
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
# test non-static page
r = fc.get('myapp')
pkok(
not re.search(
r'googletag',
pkcompat.from_bytes(r.data)
),
'Unexpected injection of googletag data={}',
r.data
)
# test successful injection
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
|
flexible
|
{
"blob_id": "65b5db0bc6f23c342138060b7a006ff61e2dcf45",
"index": 3761,
"step-1": "<mask token>\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-2": "<mask token>\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-3": "<mask token>\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\nimport pytest\nimport os\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Test(s) for static files\n\n:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport pytest\nimport os\n\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(\n SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID,\n )\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n\n # test non-static page\n r = fc.get('myapp')\n pkok(\n not re.search(\n r'googletag',\n pkcompat.from_bytes(r.data)\n ),\n 'Unexpected injection of googletag data={}',\n r.data\n )\n\n # test successful injection\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
This module contains the logic to resolve the head-tail orientation of a predicted video time series.
"""
import logging
import numpy as np
import numpy.ma as ma
from wormpose.pose.distance_metrics import angle_distance, skeleton_distance
from wormpose.pose.results_datatypes import (
BaseResults,
ShuffledResults,
OriginalResults,
)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment
CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)
# we consider frames to be part of the same segment if they are maximum this amount of seconds apart
# (and satisfy the distance threshold)
CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2
# discard too small segments less than this amount of seconds
MIN_SEGMENT_SIZE_SEC = 0.2
# don't align isolated segments that are more than this amount of seconds apart from aligned segments
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1
def _init_partitioned_series(shuffled_series: np.ndarray):
return ma.masked_all_like(shuffled_series)
def _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int):
partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]
partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]
if adjacent_segment_offset == -1:
closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment
closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment
elif adjacent_segment_offset == 1:
closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment
closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment
else:
raise ValueError()
dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]
segments_alignment[cur_segment_index] = int(np.argmax(dists))
unaligned = np.where(segments_alignment.mask)[0]
return segments_alignment
def _init_unified_series(mixed_series):
return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype)
def resolve_head_tail(
shuffled_results: ShuffledResults,
original_results: OriginalResults,
frame_rate: float,
score_threshold,
) -> BaseResults:
len_series = len(shuffled_results)
# Create continuous segments without jumps
partitioned_results = _make_continuous_partitions(
score_threshold=score_threshold,
frame_rate=frame_rate,
shuffled_results=shuffled_results,
)
segments = partitioned_results.get_segments()
if len(segments) == 0:
logger.error(
f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},"
f" stopping analysis."
)
return _FinalResults.from_shuffled(shuffled_results)
# Choose each segment global alignment by comparing with labelled data
segments_alignment = _align_segments_with_labels(
segments, partitioned_results.skeletons, original_results.skeletons
)
# Fix unaligned segments here by comparing skeletons with neighboring segments iteratively
segments_alignment = _align_unlabelled_segments_with_adjacents(
segments, segments_alignment, partitioned_results.skeletons, frame_rate
)
# Compile results
resolved_results = _ResolvedResults(partitioned_results)
for segment, segment_alignment in zip(segments, segments_alignment):
if not ma.is_masked(segment_alignment):
resolved_results.resolve(segment, segment_alignment)
# Filter the final results again by score threshold
low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0]
resolved_results.mask(low_scores_indices)
num_success = resolved_results.num_valid()
original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum()
logger.info(
f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully "
f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}"
f" or {(float(original_num_success) / len_series * 100):.1f}% of total)"
)
if num_success < original_num_success:
logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!")
return _FinalResults.from_resolved(resolved_results)
|
normal
|
{
"blob_id": "b8fcd8e6dce8d210576bc4166dd258e5fd51278d",
"index": 517,
"step-1": "<mask token>\n\n\nclass _PartitionedResults(BaseResults):\n <mask token>\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n <mask token>\n <mask token>\n <mask token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\n<mask token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<mask token>\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"step-3": "<mask token>\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\n<mask token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<mask token>\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"step-4": "<mask token>\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\nMIN_SEGMENT_SIZE_SEC = 0.2\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"step-5": "\"\"\"\nThis module contains the logic to resolve the head-tail orientation of a predicted video time series.\n\"\"\"\n\nimport logging\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom wormpose.pose.distance_metrics import angle_distance, skeleton_distance\nfrom wormpose.pose.results_datatypes import (\n BaseResults,\n ShuffledResults,\n OriginalResults,\n)\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n# threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\n\n# we consider frames to be part of the same segment if they are maximum this amount of seconds apart\n# (and satisfy the distance threshold)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\n\n# discard too small segments less than this amount of seconds\nMIN_SEGMENT_SIZE_SEC = 0.2\n\n# don't align isolated segments that are more than this amount of seconds apart from aligned segments\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]\n\n\nclass _PartitionedResults(BaseResults):\n def __init__(self, shuffled_results: ShuffledResults):\n\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):\n if new_partition:\n self.cur_partition += 1\n\n _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [\n self._get_partition_indices(partition_index)\n for partition_index in all_partitions_indexes\n if partition_index >= 0\n ]\n\n\nclass _ResolvedResults(BaseResults):\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(\n theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan),\n scores=resolved_results.scores.filled(np.nan),\n )\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(\n theta=np.full_like(shuffled_results.theta[:, 0], np.nan),\n skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),\n scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),\n )\n\n\ndef _make_continuous_partitions(\n shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float\n) -> _PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n\n partitioned_results = _PartitionedResults(shuffled_results)\n\n # discard low score frames early (use the maximum value of both scores for now)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]\n\n for frame_index in good_score_frames:\n\n prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]\n\n # if there is a big gap > time_window we start a new partition, with a random value (0)\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)\n # otherwise we look in the time_window close past the closest non nan frame see if we can continue the\n # partition as long as the values stay continuous\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]\n dists = [\n angle_distance(\n shuffled_results.theta[frame_index, k, :],\n prev_theta[last_valid_index],\n )\n for k in range(2)\n ]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index, partition=partition)\n\n # discard short segments\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(\n segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]\n ):\n dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]\n similarity_scores.append(dists)\n\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(mean_similarity_scores)\n\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):\n # evaluate how far away this segment is from known values\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n # ensure that if no segments have been aligned at all, pick one solution randomly to start\n if np.all(segments_alignment.mask):\n logger.info(\"There are no trusted segments with head decision to resolve the whole video, stopping analysis.\")\n return segments_alignment\n\n # fix in priority the segments with known adjacent frames with little gap\n # until all segments are aligned except the isolated ones (further than maximum_gap_allowed)\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n # we first pick the best candidate segment to align (there are known frames nearby before or after or both)\n all_gaps = [\n _calculate_smallest_gap_to_adjacent(\n segment_index=x,\n segments=segments,\n segments_alignment=segments_alignment,\n )\n for x in unaligned\n ]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]\n\n # abort if only isolated segments are left\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]\n\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]\n\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment\n else:\n raise ValueError()\n\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n\n unaligned = np.where(segments_alignment.mask)[0]\n\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(\n shuffled_results: ShuffledResults,\n original_results: OriginalResults,\n frame_rate: float,\n score_threshold,\n) -> BaseResults:\n len_series = len(shuffled_results)\n\n # Create continuous segments without jumps\n partitioned_results = _make_continuous_partitions(\n score_threshold=score_threshold,\n frame_rate=frame_rate,\n shuffled_results=shuffled_results,\n )\n segments = partitioned_results.get_segments()\n\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},\"\n f\" stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n\n # Choose each segment global alignment by comparing with labelled data\n segments_alignment = _align_segments_with_labels(\n segments, partitioned_results.skeletons, original_results.skeletons\n )\n\n # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively\n segments_alignment = _align_unlabelled_segments_with_adjacents(\n segments, segments_alignment, partitioned_results.skeletons, frame_rate\n )\n\n # Compile results\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n\n # Filter the final results again by score threshold\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum()\n logger.info(\n f\"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully \"\n f\"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}\"\n f\" or {(float(original_num_success) / len_series * 100):.1f}% of total)\"\n )\n if num_success < original_num_success:\n logger.warning(f\"Original results had {original_num_success - num_success} more successfully analyzed frames!\")\n\n return _FinalResults.from_resolved(resolved_results)\n",
"step-ids": [
10,
19,
20,
24,
26
]
}
|
[
10,
19,
20,
24,
26
] |
<|reserved_special_token_0|>
class xspecView(object):
<|reserved_special_token_0|>
def LoadSwiftPHAs(self, phaFiles):
"""
Load The Swift PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-15. 150.-**')
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self, phaFiles):
"""
Load The GBM NaI PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-8. 1999..-**')
cnts = sum(s.values)
self.nai.append(cnts)
<|reserved_special_token_0|>
def SetTimeBins(self, starts, stops):
self.tBins = array(zip(starts, stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=
'column')
Step(grid[0], self.tBins, self.swift, 'r', 1.0)
Step(grid[1], self.tBins, self.nai, 'b', 1.0)
Step(grid[2], self.tBins, self.bgo, 'g', 1.0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class xspecView(object):
<|reserved_special_token_0|>
def LoadSwiftPHAs(self, phaFiles):
"""
Load The Swift PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-15. 150.-**')
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self, phaFiles):
"""
Load The GBM NaI PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-8. 1999..-**')
cnts = sum(s.values)
self.nai.append(cnts)
def LoadBGOPHAs(self, phaFiles):
"""
Load The GBM BGO PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-250. 10000.-**')
cnts = sum(s.values)
self.bgo.append(cnts)
def SetTimeBins(self, starts, stops):
self.tBins = array(zip(starts, stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=
'column')
Step(grid[0], self.tBins, self.swift, 'r', 1.0)
Step(grid[1], self.tBins, self.nai, 'b', 1.0)
Step(grid[2], self.tBins, self.bgo, 'g', 1.0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class xspecView(object):
def __init__(self):
xs.Plot.xAxis = 'keV'
self.swift = []
self.nai = []
self.bgo = []
def LoadSwiftPHAs(self, phaFiles):
"""
Load The Swift PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-15. 150.-**')
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self, phaFiles):
"""
Load The GBM NaI PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-8. 1999..-**')
cnts = sum(s.values)
self.nai.append(cnts)
def LoadBGOPHAs(self, phaFiles):
"""
Load The GBM BGO PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-250. 10000.-**')
cnts = sum(s.values)
self.bgo.append(cnts)
def SetTimeBins(self, starts, stops):
self.tBins = array(zip(starts, stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=
'column')
Step(grid[0], self.tBins, self.swift, 'r', 1.0)
Step(grid[1], self.tBins, self.nai, 'b', 1.0)
Step(grid[2], self.tBins, self.bgo, 'g', 1.0)
<|reserved_special_token_1|>
from numpy import array
import xspec as xs
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Grid
from spectralTools.step import Step
class xspecView(object):
def __init__(self):
xs.Plot.xAxis = 'keV'
self.swift = []
self.nai = []
self.bgo = []
def LoadSwiftPHAs(self, phaFiles):
"""
Load The Swift PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-15. 150.-**')
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self, phaFiles):
"""
Load The GBM NaI PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-8. 1999..-**')
cnts = sum(s.values)
self.nai.append(cnts)
def LoadBGOPHAs(self, phaFiles):
"""
Load The GBM BGO PHAs in time order
"""
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore('**-250. 10000.-**')
cnts = sum(s.values)
self.bgo.append(cnts)
def SetTimeBins(self, starts, stops):
self.tBins = array(zip(starts, stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=
'column')
Step(grid[0], self.tBins, self.swift, 'r', 1.0)
Step(grid[1], self.tBins, self.nai, 'b', 1.0)
Step(grid[2], self.tBins, self.bgo, 'g', 1.0)
<|reserved_special_token_1|>
from numpy import array
import xspec as xs
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Grid
from spectralTools.step import Step
class xspecView(object):
def __init__(self):
#xs.Plot.device="/xs"
xs.Plot.xAxis='keV'
self.swift = []
self.nai=[]
self.bgo=[]
def LoadSwiftPHAs(self,phaFiles):
'''
Load The Swift PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-15. 150.-**")
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self,phaFiles):
'''
Load The GBM NaI PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-8. 1999..-**")
cnts = sum(s.values)
self.nai.append(cnts)
def LoadBGOPHAs(self,phaFiles):
'''
Load The GBM BGO PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-250. 10000.-**")
cnts = sum(s.values)
self.bgo.append(cnts)
def SetTimeBins(self,starts,stops):
self.tBins = array(zip(starts,stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column')
Step(grid[0],self.tBins,self.swift,'r',1.)
Step(grid[1],self.tBins,self.nai,'b',1.)
Step(grid[2],self.tBins,self.bgo,'g',1.)
|
flexible
|
{
"blob_id": "ba34bae7849ad97f939c1a7cb91461269cd58b64",
"index": 8994,
"step-1": "<mask token>\n\n\nclass xspecView(object):\n <mask token>\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n <mask token>\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-2": "<mask token>\n\n\nclass xspecView(object):\n <mask token>\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-3": "<mask token>\n\n\nclass xspecView(object):\n\n def __init__(self):\n xs.Plot.xAxis = 'keV'\n self.swift = []\n self.nai = []\n self.bgo = []\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-4": "from numpy import array\nimport xspec as xs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import Grid\nfrom spectralTools.step import Step\n\n\nclass xspecView(object):\n\n def __init__(self):\n xs.Plot.xAxis = 'keV'\n self.swift = []\n self.nai = []\n self.bgo = []\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-5": "from numpy import array\nimport xspec as xs \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import Grid\nfrom spectralTools.step import Step\n\n\n\nclass xspecView(object):\n\n\n def __init__(self):\n\n #xs.Plot.device=\"/xs\"\n xs.Plot.xAxis='keV'\n\n self.swift = []\n self.nai=[]\n self.bgo=[]\n\n def LoadSwiftPHAs(self,phaFiles):\n '''\n Load The Swift PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-15. 150.-**\")\n\n cnts = sum(s.values)\n\n\n self.swift.append(cnts)\n\n\n def LoadNaiPHAs(self,phaFiles):\n '''\n Load The GBM NaI PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-8. 1999..-**\")\n cnts = sum(s.values)\n\n self.nai.append(cnts)\n\n\n def LoadBGOPHAs(self,phaFiles):\n '''\n Load The GBM BGO PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-250. 10000.-**\")\n cnts = sum(s.values)\n\n self.bgo.append(cnts)\n \n\n\n def SetTimeBins(self,starts,stops):\n\n self.tBins = array(zip(starts,stops))\n\n \n\n def PlotLC(self):\n\n fig = plt.figure(1)\n\n grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column')\n \n Step(grid[0],self.tBins,self.swift,'r',1.)\n\n Step(grid[1],self.tBins,self.nai,'b',1.)\n\n Step(grid[2],self.tBins,self.bgo,'g',1.)\n \n\n \n \n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('http://www.pythonchallenge.com/pc/def/ocr.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
INPUT_TEXT = string.ascii_lowercase
OUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]
TRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)
CYPHER_TEXT = (
"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
)
print('http://www.pythonchallenge.com/pc/def/ocr.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import string
INPUT_TEXT = string.ascii_lowercase
OUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]
TRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)
CYPHER_TEXT = (
"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
)
print('http://www.pythonchallenge.com/pc/def/ocr.html')
<|reserved_special_token_1|>
"""
Python Challenge - Level 1 - What about making trans?
"""
import string
#import requests
#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')
#res.raise_for_status()
#print(res.text)
INPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz
OUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab
TRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)
CYPHER_TEXT = """g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \
amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \
rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \
ynnjw ml rfc spj."""
#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))
# The encrypted text told us to apply the same translation to the url
#print('map'.translate(TRANSLATION_TABLE)) # solution here
# Success, let's print out the next level url
print('http://www.pythonchallenge.com/pc/def/ocr.html')
|
flexible
|
{
"blob_id": "3c03f71ef9de8825ecd7c89208c79f43c9fb7a56",
"index": 9594,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-3": "<mask token>\nINPUT_TEXT = string.ascii_lowercase\nOUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = (\n \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n )\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-4": "<mask token>\nimport string\nINPUT_TEXT = string.ascii_lowercase\nOUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = (\n \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n )\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-5": "\"\"\"\nPython Challenge - Level 1 - What about making trans?\n\"\"\"\nimport string\n#import requests\n#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')\n#res.raise_for_status()\n#print(res.text)\n\nINPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz\nOUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = \"\"\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \\\namknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \\\nrfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \\\nynnjw ml rfc spj.\"\"\"\n\n#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))\n\n# The encrypted text told us to apply the same translation to the url\n#print('map'.translate(TRANSLATION_TABLE)) # solution here\n\n# Success, let's print out the next level url\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sqlite3
import hashlib
users = []
class UserModel:
id = 0
def __init__(self, name, password, birth, sex, phone, email, id=0):
if(id == 0):
self.id = self.id + 1
else:
self.id = id
self.name = name
self.email = email
#處理密碼
s = hashlib.sha256()
s.update(password.encode('utf-8'))
self.password = s.hexdigest()
self.birth = birth
self.sex = sex
self.phone = phone
def add_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
insert_query = 'INSERT INTO users (name, password, sex, birth, phone, email) \
VALUES(?, ?, ?, ?, ?, ?)'
cursor.execute(insert_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email))
conn.commit()
conn.close()
@staticmethod
def get_user(self, id):
user = None
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users WHERE id=?'
print(query_one_query)
result = cursor.execute(query_one_query, (str(id),)).fetchone()
if result is None:
return None
print(result)
user = UserModel(id=result[0], name=result[1], password=result[2], sex = result[3], \
birth=result[4], phone=result[5], email=result[6])
user.id = result[0]
conn.close()
return user
@staticmethod
def delete_user(self, id):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
delete_query = 'DELETE FROM users WHERE id=?'
cursor.execute(delete_query, (id,))
conn.commit()
conn.close()
def update_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
update_query = 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'
cursor.execute(update_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email, self.id))
conn.commit()
conn.close()
@staticmethod
def get_all_user():
users = []
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users'
for item in cursor.execute(query_one_query):
user = UserModel(id=item[0], name=item[1], password=item[2], sex = item[3], \
birth=item[4], phone=item[5], email=item[6])
users.append(user)
conn.close()
return users
if __name__ == "__main__":
print(UserModel.get_all_user())
|
normal
|
{
"blob_id": "e675283f14a3d29fba878e7f6d9592130611c2be",
"index": 1469,
"step-1": "<mask token>\n\n\nclass UserModel:\n <mask token>\n\n def __init__(self, name, password, birth, sex, phone, email, id=0):\n if id == 0:\n self.id = self.id + 1\n else:\n self.id = id\n self.name = name\n self.email = email\n s = hashlib.sha256()\n s.update(password.encode('utf-8'))\n self.password = s.hexdigest()\n self.birth = birth\n self.sex = sex\n self.phone = phone\n\n def add_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n insert_query = (\n 'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'\n )\n cursor.execute(insert_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_user(self, id):\n user = None\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users WHERE id=?'\n print(query_one_query)\n result = cursor.execute(query_one_query, (str(id),)).fetchone()\n if result is None:\n return None\n print(result)\n user = UserModel(id=result[0], name=result[1], password=result[2],\n sex=result[3], birth=result[4], phone=result[5], email=result[6])\n user.id = result[0]\n conn.close()\n return user\n\n @staticmethod\n def delete_user(self, id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n delete_query = 'DELETE FROM users WHERE id=?'\n cursor.execute(delete_query, (id,))\n conn.commit()\n conn.close()\n\n def update_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n update_query = (\n 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'\n )\n cursor.execute(update_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email, self.id))\n conn.commit()\n conn.close()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserModel:\n <mask token>\n\n def __init__(self, name, password, birth, sex, phone, email, id=0):\n if id == 0:\n self.id = self.id + 1\n else:\n self.id = id\n self.name = name\n self.email = email\n s = hashlib.sha256()\n s.update(password.encode('utf-8'))\n self.password = s.hexdigest()\n self.birth = birth\n self.sex = sex\n self.phone = phone\n\n def add_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n insert_query = (\n 'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'\n )\n cursor.execute(insert_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_user(self, id):\n user = None\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users WHERE id=?'\n print(query_one_query)\n result = cursor.execute(query_one_query, (str(id),)).fetchone()\n if result is None:\n return None\n print(result)\n user = UserModel(id=result[0], name=result[1], password=result[2],\n sex=result[3], birth=result[4], phone=result[5], email=result[6])\n user.id = result[0]\n conn.close()\n return user\n\n @staticmethod\n def delete_user(self, id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n delete_query = 'DELETE FROM users WHERE id=?'\n cursor.execute(delete_query, (id,))\n conn.commit()\n conn.close()\n\n def update_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n update_query = (\n 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'\n )\n cursor.execute(update_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email, self.id))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_all_user():\n users = []\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users'\n for item in cursor.execute(query_one_query):\n user = UserModel(id=item[0], name=item[1], password=item[2],\n sex=item[3], birth=item[4], phone=item[5], email=item[6])\n users.append(user)\n conn.close()\n return users\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UserModel:\n id = 0\n\n def __init__(self, name, password, birth, sex, phone, email, id=0):\n if id == 0:\n self.id = self.id + 1\n else:\n self.id = id\n self.name = name\n self.email = email\n s = hashlib.sha256()\n s.update(password.encode('utf-8'))\n self.password = s.hexdigest()\n self.birth = birth\n self.sex = sex\n self.phone = phone\n\n def add_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n insert_query = (\n 'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'\n )\n cursor.execute(insert_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_user(self, id):\n user = None\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users WHERE id=?'\n print(query_one_query)\n result = cursor.execute(query_one_query, (str(id),)).fetchone()\n if result is None:\n return None\n print(result)\n user = UserModel(id=result[0], name=result[1], password=result[2],\n sex=result[3], birth=result[4], phone=result[5], email=result[6])\n user.id = result[0]\n conn.close()\n return user\n\n @staticmethod\n def delete_user(self, id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n delete_query = 'DELETE FROM users WHERE id=?'\n cursor.execute(delete_query, (id,))\n conn.commit()\n conn.close()\n\n def update_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n update_query = (\n 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'\n )\n cursor.execute(update_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email, self.id))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_all_user():\n users = []\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users'\n for item in cursor.execute(query_one_query):\n user = UserModel(id=item[0], name=item[1], password=item[2],\n sex=item[3], birth=item[4], phone=item[5], email=item[6])\n users.append(user)\n conn.close()\n return users\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass UserModel:\n id = 0\n\n def __init__(self, name, password, birth, sex, phone, email, id=0):\n if id == 0:\n self.id = self.id + 1\n else:\n self.id = id\n self.name = name\n self.email = email\n s = hashlib.sha256()\n s.update(password.encode('utf-8'))\n self.password = s.hexdigest()\n self.birth = birth\n self.sex = sex\n self.phone = phone\n\n def add_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n insert_query = (\n 'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'\n )\n cursor.execute(insert_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_user(self, id):\n user = None\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users WHERE id=?'\n print(query_one_query)\n result = cursor.execute(query_one_query, (str(id),)).fetchone()\n if result is None:\n return None\n print(result)\n user = UserModel(id=result[0], name=result[1], password=result[2],\n sex=result[3], birth=result[4], phone=result[5], email=result[6])\n user.id = result[0]\n conn.close()\n return user\n\n @staticmethod\n def delete_user(self, id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n delete_query = 'DELETE FROM users WHERE id=?'\n cursor.execute(delete_query, (id,))\n conn.commit()\n conn.close()\n\n def update_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n update_query = (\n 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'\n )\n cursor.execute(update_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email, self.id))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_all_user():\n users = []\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users'\n for item in cursor.execute(query_one_query):\n user = UserModel(id=item[0], name=item[1], password=item[2],\n sex=item[3], birth=item[4], phone=item[5], email=item[6])\n users.append(user)\n conn.close()\n return users\n\n\nif __name__ == '__main__':\n print(UserModel.get_all_user())\n",
"step-5": "import sqlite3\nimport hashlib\n\nusers = []\n\nclass UserModel:\n id = 0\n\n def __init__(self, name, password, birth, sex, phone, email, id=0):\n if(id == 0):\n self.id = self.id + 1\n else:\n self.id = id\n self.name = name\n self.email = email\n\n #處理密碼\n s = hashlib.sha256()\n s.update(password.encode('utf-8'))\n self.password = s.hexdigest()\n\n self.birth = birth\n self.sex = sex\n self.phone = phone\n\n def add_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n insert_query = 'INSERT INTO users (name, password, sex, birth, phone, email) \\\n VALUES(?, ?, ?, ?, ?, ?)'\n cursor.execute(insert_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_user(self, id):\n user = None\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users WHERE id=?'\n print(query_one_query)\n result = cursor.execute(query_one_query, (str(id),)).fetchone()\n if result is None:\n return None\n print(result)\n user = UserModel(id=result[0], name=result[1], password=result[2], sex = result[3], \\\n birth=result[4], phone=result[5], email=result[6])\n user.id = result[0]\n conn.close()\n return user\n\n @staticmethod\n def delete_user(self, id):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n delete_query = 'DELETE FROM users WHERE id=?'\n cursor.execute(delete_query, (id,))\n conn.commit()\n conn.close()\n\n def update_user(self):\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n update_query = 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'\n cursor.execute(update_query, (self.name, self.password, self.sex,\n self.birth, self.phone, self.email, self.id))\n conn.commit()\n conn.close()\n\n @staticmethod\n def get_all_user():\n users = []\n conn = sqlite3.connect('main.db')\n cursor = conn.cursor()\n query_one_query = 'SELECT * FROM users'\n for item in cursor.execute(query_one_query):\n user = UserModel(id=item[0], name=item[1], password=item[2], sex = item[3], \\\n birth=item[4], phone=item[5], email=item[6])\n users.append(user)\n conn.close()\n return users\n\nif __name__ == \"__main__\":\n print(UserModel.get_all_user())",
"step-ids": [
6,
7,
8,
9,
12
]
}
|
[
6,
7,
8,
9,
12
] |
import subprocess
class BaseExecution:
def __init__(self, flag, parser):
self.flag = flag
self.parser = parser
def execute(self):
process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=
subprocess.PIPE, stderr=subprocess.PIPE)
output, err = process.communicate()
return_code = process.returncode
parser = self.parser(output, err, return_code)
result = parser.parse()
return result
|
normal
|
{
"blob_id": "d8af43d24a2f2b99bc8b5098f251e017852d6d86",
"index": 1085,
"step-1": "<mask token>\n\n\nclass BaseExecution:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"step-4": "import subprocess\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PageDetector:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PageDetector:
def __init__(self, driver):
self.selenium = SeleniumWrapper(driver)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PageDetector:
def __init__(self, driver):
self.selenium = SeleniumWrapper(driver)
def detect(self):
if self.selenium.wait_for_presence(locator=(By.ID, 'teams-app-bar'),
timeout=30):
if self.selenium.wait_for_presence(locator=(By.ID,
'download-desktop-page'), timeout=3):
return 'promo-page'
return 'main-app-page'
elif self.selenium.wait_for_title_contains(title_substring=
'Sign in', timeout=30):
return 'sign-in-page'
else:
return 'unknown'
<|reserved_special_token_1|>
from wrapper import SeleniumWrapper
from selenium.webdriver.common.by import By
class PageDetector:
def __init__(self, driver):
self.selenium = SeleniumWrapper(driver)
def detect(self):
if self.selenium.wait_for_presence(locator=(By.ID, 'teams-app-bar'),
timeout=30):
if self.selenium.wait_for_presence(locator=(By.ID,
'download-desktop-page'), timeout=3):
return 'promo-page'
return 'main-app-page'
elif self.selenium.wait_for_title_contains(title_substring=
'Sign in', timeout=30):
return 'sign-in-page'
else:
return 'unknown'
<|reserved_special_token_1|>
from wrapper import SeleniumWrapper
from selenium.webdriver.common.by import By
class PageDetector:
def __init__(self, driver):
self.selenium = SeleniumWrapper(driver)
def detect(self):
if self.selenium.wait_for_presence(locator=(By.ID, "teams-app-bar"), timeout=30):
if self.selenium.wait_for_presence(locator=(By.ID, "download-desktop-page"), timeout=3):
return "promo-page"
return "main-app-page"
elif self.selenium.wait_for_title_contains(title_substring="Sign in", timeout=30):
return "sign-in-page"
else:
return "unknown"
|
flexible
|
{
"blob_id": "603d7df0639def2b620cca2299077674e35a74b2",
"index": 5980,
"step-1": "<mask token>\n\n\nclass PageDetector:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PageDetector:\n\n def __init__(self, driver):\n self.selenium = SeleniumWrapper(driver)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PageDetector:\n\n def __init__(self, driver):\n self.selenium = SeleniumWrapper(driver)\n\n def detect(self):\n if self.selenium.wait_for_presence(locator=(By.ID, 'teams-app-bar'),\n timeout=30):\n if self.selenium.wait_for_presence(locator=(By.ID,\n 'download-desktop-page'), timeout=3):\n return 'promo-page'\n return 'main-app-page'\n elif self.selenium.wait_for_title_contains(title_substring=\n 'Sign in', timeout=30):\n return 'sign-in-page'\n else:\n return 'unknown'\n",
"step-4": "from wrapper import SeleniumWrapper\nfrom selenium.webdriver.common.by import By\n\n\nclass PageDetector:\n\n def __init__(self, driver):\n self.selenium = SeleniumWrapper(driver)\n\n def detect(self):\n if self.selenium.wait_for_presence(locator=(By.ID, 'teams-app-bar'),\n timeout=30):\n if self.selenium.wait_for_presence(locator=(By.ID,\n 'download-desktop-page'), timeout=3):\n return 'promo-page'\n return 'main-app-page'\n elif self.selenium.wait_for_title_contains(title_substring=\n 'Sign in', timeout=30):\n return 'sign-in-page'\n else:\n return 'unknown'\n",
"step-5": "from wrapper import SeleniumWrapper\nfrom selenium.webdriver.common.by import By\n\n\n\nclass PageDetector:\n\n def __init__(self, driver):\n self.selenium = SeleniumWrapper(driver)\n \n\n def detect(self):\n if self.selenium.wait_for_presence(locator=(By.ID, \"teams-app-bar\"), timeout=30):\n if self.selenium.wait_for_presence(locator=(By.ID, \"download-desktop-page\"), timeout=3):\n return \"promo-page\"\n return \"main-app-page\"\n elif self.selenium.wait_for_title_contains(title_substring=\"Sign in\", timeout=30):\n return \"sign-in-page\"\n else:\n return \"unknown\"",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sqlite3
forth = sqlite3.connect('databaserupin.db')
sql = "SELECT * from rupin;"
curforth = forth.cursor()
curforth.execute(sql)
result = curforth.fetchall()
for record in result:
print(record)
|
normal
|
{
"blob_id": "a7f082737bf476a4bc6a40c962764c05bed9ee14",
"index": 9247,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncurforth.execute(sql)\n<mask token>\nfor record in result:\n print(record)\n",
"step-3": "<mask token>\nforth = sqlite3.connect('databaserupin.db')\nsql = 'SELECT * from rupin;'\ncurforth = forth.cursor()\ncurforth.execute(sql)\nresult = curforth.fetchall()\nfor record in result:\n print(record)\n",
"step-4": "import sqlite3\nforth = sqlite3.connect('databaserupin.db')\nsql = 'SELECT * from rupin;'\ncurforth = forth.cursor()\ncurforth.execute(sql)\nresult = curforth.fetchall()\nfor record in result:\n print(record)\n",
"step-5": "import sqlite3\n\nforth = sqlite3.connect('databaserupin.db')\n\nsql = \"SELECT * from rupin;\"\n\ncurforth = forth.cursor()\ncurforth.execute(sql)\n\nresult = curforth.fetchall()\nfor record in result:\n print(record)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
-Medium-
*BFS*
You are given a 0-indexed integer array nums containing distinct numbers, an integer start, and an integer goal. There is an integer x that is initially set to start, and you want to perform operations on x such that it is converted to goal. You can perform the following operation repeatedly on the number x:
If 0 <= x <= 1000, then for any index i in the array (0 <= i < nums.length), you can set x to any of the following:
x + nums[i]
x - nums[i]
x ^ nums[i] (bitwise-XOR)
Note that you can use each nums[i] any number of times in any order. Operations that set x to be out of the range 0 <= x <= 1000 are valid, but no more operations can be done afterward.
Return the minimum number of operations needed to convert x = start into goal, and -1 if it is not possible.
Example 1:
Input: nums = [2,4,12], start = 2, goal = 12
Output: 2
Explanation: We can go from 2 → 14 → 12 with the following 2 operations.
- 2 + 12 = 14
- 14 - 2 = 12
Example 2:
Input: nums = [3,5,7], start = 0, goal = -4
Output: 2
Explanation: We can go from 0 → 3 → -4 with the following 2 operations.
- 0 + 3 = 3
- 3 - 7 = -4
Note that the last operation sets x out of the range 0 <= x <= 1000, which is valid.
Example 3:
Input: nums = [2,8,16], start = 0, goal = 1
Output: -1
Explanation: There is no way to convert 0 into 1.
Constraints:
1 <= nums.length <= 1000
-109 <= nums[i], goal <= 109
0 <= start <= 1000
start != goal
All the integers in nums are distinct.
'''
from typing import List
from collections import deque
class Solution:
def minimumOperations(self, nums: List[int], start: int, goal: int) -> int:
que = deque([(start,0)])
visited = set()
while que:
x, steps = que.popleft()
for i in nums:
for t in [x+i, x-i, x^i]:
if t == goal: return steps + 1
if 0 <= t <= 1000 and t not in visited:
visited.add(t)
que.append((t, steps+1))
return -1
if __name__ == "__main__":
print(Solution().minimumOperations(nums = [2,4,12], start = 2, goal = 12))
print(Solution().minimumOperations(nums = [3,5,7], start = 0, goal = -4))
print(Solution().minimumOperations(nums = [2,8,16], start = 0, goal = 1))
nums = [-574083075,-393928592,-508025046,942818778,355796909,515245901,40297943,106087952,112856312,-516143616,363801856,431681353,726373078,947630603,357311001,594181298,-797268217,-741740009,310972287,588107527,-535699426,56324906,-77958073,739798122,-839472160,439902753,-599749231,-378067373,-466272504,-668036170,404827976,805486978,-762507067,726001618,-761047930,574054980,365793614,112020312,612806855,-256862366,174046424,646109365,263765015,952305939,864217737,-236873371,-991807014,365730786,-908194963,-778205177,-949314048,-636570500,-883257881,316313456,-846577965,132287864,-143230736,425542510,-99852882,-845180792,-329895545,402782707,-52191127,-470380017,-788836785,-655887976,-899430590,481923982,45348738,-595401481,-470990760,-417390352,-570278840,-873871723,-905595403,276201114,-733014032,126018863,452235438,-512574658,-172220362,845468743,-743189114,597319839,-584451932,410604481,-508885990,-670396751,-765996786,345814977,-920014372,-826696704,640912714,119494504,745808962,-503060001,-677959595,-831428592,282855843,150678167,-467803553,-503929808,636431692,-235369757,-964826080,93942566,-65314422,-385277528,-379647659,601981747,-724269861,-516713072,-487487495,655771565,406499531,-943540581,-290169291,438686645,-227355533,-822612523,218329747,-800810927,-944724740,-978181517,274815523,296317841,56043572,-712672386,-374759873,86973233,-246165119,73819230,-801140338,414767806,883318746,-822063159,-705772942,-674915800,710520717,-97115365,599549847,115344568,53002314,242487774,-665998906,-986068895,-844909606,-515222297,-500827406,317865850,-50395059,522417393,51184184,241544846,-996297136,-227251827,924359619,822815774,149467545,523511343,252991991,450254984,-393459583,617410075,197030479,-234418418,-256650708,872334551,779068346,216294504,-708680875,-171498970,-970211466,-176493993,729939373,-658054782,-342680218,75508900,-377139149,392008859,121412250,-163586626,-468148273,624248706,50004864,-862378428,-849927586,33598413,-157654824,-229712613,149116317,183820138,378717707,-995563605,777654910,511275580,-157964872,-718605034,-764316227,-225837302,-166208500,-587688677,78982205,-488693575,667205793,419165994,731543316,97551954,-387317666,-580873271,533504431,-31624036,-356035140,-849089082,-767376392,-625237600,940717947,-337709497,915255567,727274007,-879463448,-363148174,-854892492,110472344,-466194659,-146843198,-454944217,-365338018,-349424052,994474446,-554968068,-883734951,-697723265,583756420,-5696410,-413731452,-278706136,-399245668,83345207,-227231270,618384545,846514423,-556667092,590460194,-686116067,-509669269,-510065093,77094171,270317951,166095128,-918526061,-766370855,-20861321,478791777,663673443,-152055285,224745414,123998803,66824877,-85117337,212126175,-718523523,615359230,-212148589,620733736,-81197397,51814471,709312024,562145805,-770811828,321230393,-611636320,-421337549,-804527290,-416739656,-886764000,170695026,414273830,-449987380,-56782953,772039002,-961265403,-896009751,-524231358,497253209,-507048459,-308522246,-508249054,-53240581,-241704483,-974133571,232897679,-152365934,-861310248,-305766289,340680726,844612779,-180227470,40798478,729446447,395975250,-142447074,-606021375,47555730,294446347,452346091,-409427076,-845574381,-838995437,45787728,714700474,-315824001,694717388,502723269,119244099,-538412679,-207297135,-189078560,-812610469,-350061253,-73975237,-119323509,791863263,741180208,740488891,-475394166,-191585617,-441527154,767292531,201222965,-150196525,588513813,245328283,396662663,100705864,126789247,487161165,-460512081,-469521559,-998848254,-917609155,314537168,418002454,-926920818,-628671538,179971032,-105401559,449618919,823404672,178494651,-773108884,10686795,-506642993,-60172121,-510142552,651623281,-163851428,158562600,-782456228,-336697076,-571952851,849878818,-456510759,-65997243,-506043404,-558981572,186946604,124948039,954065944,707437320,-224056616,-319237038,512138196,742466011,-49725596,-784781640,-753413026,-331602365,-246166733,-658650959,-4888181,-547553549,786689548,-866846384,-212028209,-98029403,-325422497,-409855095,320083382,-491251215,-471713326,890922019,-766590943,-481641953,-227197451,-709166930,-965945544,407688175,-78385698,-372800469,389036825,79885300,-858488452,-390177477,233839191,-518116358,420408256,872470025,241770824,-106901417,-328631191,548580365,-88408815,-647601013,658880218,-870455388,277154380,370022702,-381519264,-800726224,183685380,208169777,925905330,732494840,251754641,-681988029,593628349,153852085,353590607,242118102,-788094641,-242801844,474214244,579450364,580046580,-269927114,249739292,295331955,-544556236,-814569172,808895922,707421114,305101587,621173158,-248896453,988552702,-375313331,-87289858,-796466539,-529411285,-197315984,33984203,-122839651,-90735568,277265491,762059774,-628018119,-406508643,-856856769,364613737,59319066,614382155,-614620718,-133957131,-394985422,-29943491,154443077,-72727846,392096990,562681453,364248049,-156700958,717335155,-343408748,77301840,-155372684,-432114609,414752267,-485732822,876096548,842614035,-614245110,-872219121,291509502,334817026,214330487,405297459,-449582485,789314834,936409758,452350380,-146649749,898255045,116506422,671728835,280507922,-189039799,-565803074,-439924663,-14345985,-98428526,57303809,424685389,-84977856,-9251973,998935249,229402894,-405424548,448394272,182149207,-728030940,347577568,567511928,-27655302,400866779,-509269521,-580602375,405956020,-855173313,258091129,909162200,-315251598,-236890006,-531780379,342955474,-65890269,-111521851,-139906773,34939329,927781348,300458386,-603518159,341287362,-234266006,634183737,454833275,79631354,-954691672,102295826,688738167,-958428411,-293858940,480440548,590037773,-365477625,-425165732,170388756,164258145,-507355122,44132561,982798160,-101120201,-920959602,-239250887,534862084,-834736952,-123162323,389682556,656996523,864481760,381156936,129520066,-995551618,106129054,-471580461,856850511,653020333,531769579,-190375506,-992983956,73867968,-931909584,403329114,-945055546,627782991,-666011011,214665550,505169020,210703185,-591690068,11218620,790987020,561646751,-33552011,-407054835,-850936697,-838201457,-878394038,-759131062,-857347819,531582062,941614352,-743754869,650338718,178603580,-834368178,-976933957,138667533,746471721,551579035,-173400777,-1191455,320121832,-756997945,402594806,934711944,970489131,-193223639,276816990,842959026,-799673669,-367385466,681433973,468892554,-455199860,393993101,905435993,218314965,284795080,913357885,-652530417,743455659,869345718,808902357,829820413,7206928,544900359,225903242,-507688526,750219353,-663810717,-643969173,-269151675,348252329,-144351998,693995296,-692546103,869432378,650161259,568234384,710782517,179157604,-446849233,-922615096,-61183498,30945194,819052356,467911324,119876349,46908453,-420671619,344944591,889080726,-619477633,174882730,553799129,-941691933,146036558,-116064711,222282163,-272996845,-147041859,-381977096,-786757040,229096334,712541239,326039628,-952490563,-362214129,-680530864,421358212,-472290821,-331398150,-42297937,-393141325,-467541333,655524006,452908624,-626562356,-758303565,338224482,312047704,599445442,-328430584,259549134,838272865,-755896597,-151000710,607787908,11870257,-680877184,528161590,769242561,-447486537,-127579653,135915595,-271181270,12536315,693445551,900639800,-692327759,-671179999,977783490,935798407,659688020,-478438023,-852131846,-900332354,-71029072,888095095,924175448,430392829,391195112,399460998,-173259008,-168543477,-495967896,-697314804,591126097,301126906,946273416,-772817341,-996445410,466876435,-92937212,-226599286,43831927,-588596503,-55759661,212885530,-805455693,572269060,415773175,-320900489,-651775079,5276363,91615150,-882588415,502210147,-401039810,26713405,-723806893,125439289,472777644,869504248,967552969,-268043646,-146710780,-511973692,-803204681,-146827180,-453201623,-878534466,631307563,507752930,-63646026,-348120807,222898965,-410732708,617953050,-478244422,877782569,-507956686,-196516478,-477074335,329039585,-480651334,-890030740,461391919,-977815738,-943937849,321402466,-588396975,-945139052,871313567,-484830305,365305963,891985414,466048577,880607400,-245705654,359506342,-612177301,840415132,693541406,707348310,971762025,-871678269,897143169,625100531,743908163,-315815019,-63211252,-962051459,510469141,566817231,-186207711,309838979,101194721,-127111899,-109107404,-702499174,918781433,34041307,927374088,-67369303,-680339659,202481166,-218771120,329951816,-280782626,-423403505,619779171,-567310903,-660420942,756801677,996208091,822990010,940351540,1331227,382201579,891956260,-894584436,346600029,805733487,-691767750,859030444,1]
print(Solution().minimumOperations(nums, 938, 80))
|
normal
|
{
"blob_id": "50b2b9d1edc8eaa44050e2b3b2375e966f16e10c",
"index": 6997,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def minimumOperations(self, nums: List[int], start: int, goal: int) ->int:\n que = deque([(start, 0)])\n visited = set()\n while que:\n x, steps = que.popleft()\n for i in nums:\n for t in [x + i, x - i, x ^ i]:\n if t == goal:\n return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps + 1))\n return -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def minimumOperations(self, nums: List[int], start: int, goal: int) ->int:\n que = deque([(start, 0)])\n visited = set()\n while que:\n x, steps = que.popleft()\n for i in nums:\n for t in [x + i, x - i, x ^ i]:\n if t == goal:\n return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps + 1))\n return -1\n\n\nif __name__ == '__main__':\n print(Solution().minimumOperations(nums=[2, 4, 12], start=2, goal=12))\n print(Solution().minimumOperations(nums=[3, 5, 7], start=0, goal=-4))\n print(Solution().minimumOperations(nums=[2, 8, 16], start=0, goal=1))\n nums = [-574083075, -393928592, -508025046, 942818778, 355796909, \n 515245901, 40297943, 106087952, 112856312, -516143616, 363801856, \n 431681353, 726373078, 947630603, 357311001, 594181298, -797268217, \n -741740009, 310972287, 588107527, -535699426, 56324906, -77958073, \n 739798122, -839472160, 439902753, -599749231, -378067373, -\n 466272504, -668036170, 404827976, 805486978, -762507067, 726001618,\n -761047930, 574054980, 365793614, 112020312, 612806855, -256862366,\n 174046424, 646109365, 263765015, 952305939, 864217737, -236873371, \n -991807014, 365730786, -908194963, -778205177, -949314048, -\n 636570500, -883257881, 316313456, -846577965, 132287864, -143230736,\n 425542510, -99852882, -845180792, -329895545, 402782707, -52191127,\n -470380017, -788836785, -655887976, -899430590, 481923982, 45348738,\n -595401481, -470990760, -417390352, -570278840, -873871723, -\n 905595403, 276201114, -733014032, 126018863, 452235438, -512574658,\n -172220362, 845468743, -743189114, 597319839, -584451932, 410604481,\n -508885990, -670396751, -765996786, 345814977, -920014372, -\n 826696704, 640912714, 119494504, 745808962, -503060001, -677959595,\n -831428592, 282855843, 150678167, -467803553, -503929808, 636431692,\n -235369757, -964826080, 93942566, -65314422, -385277528, -379647659,\n 601981747, -724269861, -516713072, -487487495, 655771565, 406499531,\n -943540581, -290169291, 438686645, -227355533, -822612523, \n 218329747, -800810927, -944724740, -978181517, 274815523, 296317841,\n 56043572, -712672386, -374759873, 86973233, -246165119, 73819230, -\n 801140338, 414767806, 883318746, -822063159, -705772942, -674915800,\n 710520717, -97115365, 599549847, 115344568, 53002314, 242487774, -\n 665998906, -986068895, -844909606, -515222297, -500827406, \n 317865850, -50395059, 522417393, 51184184, 241544846, -996297136, -\n 227251827, 924359619, 822815774, 149467545, 523511343, 252991991, \n 450254984, -393459583, 617410075, 197030479, -234418418, -256650708,\n 872334551, 779068346, 216294504, -708680875, -171498970, -970211466,\n -176493993, 729939373, -658054782, -342680218, 75508900, -377139149,\n 392008859, 121412250, -163586626, -468148273, 624248706, 50004864, \n -862378428, -849927586, 33598413, -157654824, -229712613, 149116317,\n 183820138, 378717707, -995563605, 777654910, 511275580, -157964872,\n -718605034, -764316227, -225837302, -166208500, -587688677, \n 78982205, -488693575, 667205793, 419165994, 731543316, 97551954, -\n 387317666, -580873271, 533504431, -31624036, -356035140, -849089082,\n -767376392, -625237600, 940717947, -337709497, 915255567, 727274007,\n -879463448, -363148174, -854892492, 110472344, -466194659, -\n 146843198, -454944217, -365338018, -349424052, 994474446, -\n 554968068, -883734951, -697723265, 583756420, -5696410, -413731452,\n -278706136, -399245668, 83345207, -227231270, 618384545, 846514423,\n -556667092, 590460194, -686116067, -509669269, -510065093, 77094171,\n 270317951, 166095128, -918526061, -766370855, -20861321, 478791777,\n 663673443, -152055285, 224745414, 123998803, 66824877, -85117337, \n 212126175, -718523523, 615359230, -212148589, 620733736, -81197397,\n 51814471, 709312024, 562145805, -770811828, 321230393, -611636320, \n -421337549, -804527290, -416739656, -886764000, 170695026, \n 414273830, -449987380, -56782953, 772039002, -961265403, -896009751,\n -524231358, 497253209, -507048459, -308522246, -508249054, -\n 53240581, -241704483, -974133571, 232897679, -152365934, -861310248,\n -305766289, 340680726, 844612779, -180227470, 40798478, 729446447, \n 395975250, -142447074, -606021375, 47555730, 294446347, 452346091, \n -409427076, -845574381, -838995437, 45787728, 714700474, -315824001,\n 694717388, 502723269, 119244099, -538412679, -207297135, -189078560,\n -812610469, -350061253, -73975237, -119323509, 791863263, 741180208,\n 740488891, -475394166, -191585617, -441527154, 767292531, 201222965,\n -150196525, 588513813, 245328283, 396662663, 100705864, 126789247, \n 487161165, -460512081, -469521559, -998848254, -917609155, \n 314537168, 418002454, -926920818, -628671538, 179971032, -105401559,\n 449618919, 823404672, 178494651, -773108884, 10686795, -506642993, \n -60172121, -510142552, 651623281, -163851428, 158562600, -782456228,\n -336697076, -571952851, 849878818, -456510759, -65997243, -\n 506043404, -558981572, 186946604, 124948039, 954065944, 707437320, \n -224056616, -319237038, 512138196, 742466011, -49725596, -784781640,\n -753413026, -331602365, -246166733, -658650959, -4888181, -\n 547553549, 786689548, -866846384, -212028209, -98029403, -325422497,\n -409855095, 320083382, -491251215, -471713326, 890922019, -\n 766590943, -481641953, -227197451, -709166930, -965945544, \n 407688175, -78385698, -372800469, 389036825, 79885300, -858488452, \n -390177477, 233839191, -518116358, 420408256, 872470025, 241770824,\n -106901417, -328631191, 548580365, -88408815, -647601013, 658880218,\n -870455388, 277154380, 370022702, -381519264, -800726224, 183685380,\n 208169777, 925905330, 732494840, 251754641, -681988029, 593628349, \n 153852085, 353590607, 242118102, -788094641, -242801844, 474214244,\n 579450364, 580046580, -269927114, 249739292, 295331955, -544556236,\n -814569172, 808895922, 707421114, 305101587, 621173158, -248896453,\n 988552702, -375313331, -87289858, -796466539, -529411285, -\n 197315984, 33984203, -122839651, -90735568, 277265491, 762059774, -\n 628018119, -406508643, -856856769, 364613737, 59319066, 614382155, \n -614620718, -133957131, -394985422, -29943491, 154443077, -72727846,\n 392096990, 562681453, 364248049, -156700958, 717335155, -343408748,\n 77301840, -155372684, -432114609, 414752267, -485732822, 876096548,\n 842614035, -614245110, -872219121, 291509502, 334817026, 214330487,\n 405297459, -449582485, 789314834, 936409758, 452350380, -146649749,\n 898255045, 116506422, 671728835, 280507922, -189039799, -565803074,\n -439924663, -14345985, -98428526, 57303809, 424685389, -84977856, -\n 9251973, 998935249, 229402894, -405424548, 448394272, 182149207, -\n 728030940, 347577568, 567511928, -27655302, 400866779, -509269521, \n -580602375, 405956020, -855173313, 258091129, 909162200, -315251598,\n -236890006, -531780379, 342955474, -65890269, -111521851, -\n 139906773, 34939329, 927781348, 300458386, -603518159, 341287362, -\n 234266006, 634183737, 454833275, 79631354, -954691672, 102295826, \n 688738167, -958428411, -293858940, 480440548, 590037773, -365477625,\n -425165732, 170388756, 164258145, -507355122, 44132561, 982798160, \n -101120201, -920959602, -239250887, 534862084, -834736952, -\n 123162323, 389682556, 656996523, 864481760, 381156936, 129520066, -\n 995551618, 106129054, -471580461, 856850511, 653020333, 531769579, \n -190375506, -992983956, 73867968, -931909584, 403329114, -945055546,\n 627782991, -666011011, 214665550, 505169020, 210703185, -591690068,\n 11218620, 790987020, 561646751, -33552011, -407054835, -850936697, \n -838201457, -878394038, -759131062, -857347819, 531582062, \n 941614352, -743754869, 650338718, 178603580, -834368178, -976933957,\n 138667533, 746471721, 551579035, -173400777, -1191455, 320121832, -\n 756997945, 402594806, 934711944, 970489131, -193223639, 276816990, \n 842959026, -799673669, -367385466, 681433973, 468892554, -455199860,\n 393993101, 905435993, 218314965, 284795080, 913357885, -652530417, \n 743455659, 869345718, 808902357, 829820413, 7206928, 544900359, \n 225903242, -507688526, 750219353, -663810717, -643969173, -\n 269151675, 348252329, -144351998, 693995296, -692546103, 869432378,\n 650161259, 568234384, 710782517, 179157604, -446849233, -922615096,\n -61183498, 30945194, 819052356, 467911324, 119876349, 46908453, -\n 420671619, 344944591, 889080726, -619477633, 174882730, 553799129, \n -941691933, 146036558, -116064711, 222282163, -272996845, -\n 147041859, -381977096, -786757040, 229096334, 712541239, 326039628,\n -952490563, -362214129, -680530864, 421358212, -472290821, -\n 331398150, -42297937, -393141325, -467541333, 655524006, 452908624,\n -626562356, -758303565, 338224482, 312047704, 599445442, -328430584,\n 259549134, 838272865, -755896597, -151000710, 607787908, 11870257, \n -680877184, 528161590, 769242561, -447486537, -127579653, 135915595,\n -271181270, 12536315, 693445551, 900639800, -692327759, -671179999,\n 977783490, 935798407, 659688020, -478438023, -852131846, -900332354,\n -71029072, 888095095, 924175448, 430392829, 391195112, 399460998, -\n 173259008, -168543477, -495967896, -697314804, 591126097, 301126906,\n 946273416, -772817341, -996445410, 466876435, -92937212, -226599286,\n 43831927, -588596503, -55759661, 212885530, -805455693, 572269060, \n 415773175, -320900489, -651775079, 5276363, 91615150, -882588415, \n 502210147, -401039810, 26713405, -723806893, 125439289, 472777644, \n 869504248, 967552969, -268043646, -146710780, -511973692, -\n 803204681, -146827180, -453201623, -878534466, 631307563, 507752930,\n -63646026, -348120807, 222898965, -410732708, 617953050, -478244422,\n 877782569, -507956686, -196516478, -477074335, 329039585, -\n 480651334, -890030740, 461391919, -977815738, -943937849, 321402466,\n -588396975, -945139052, 871313567, -484830305, 365305963, 891985414,\n 466048577, 880607400, -245705654, 359506342, -612177301, 840415132,\n 693541406, 707348310, 971762025, -871678269, 897143169, 625100531, \n 743908163, -315815019, -63211252, -962051459, 510469141, 566817231,\n -186207711, 309838979, 101194721, -127111899, -109107404, -\n 702499174, 918781433, 34041307, 927374088, -67369303, -680339659, \n 202481166, -218771120, 329951816, -280782626, -423403505, 619779171,\n -567310903, -660420942, 756801677, 996208091, 822990010, 940351540,\n 1331227, 382201579, 891956260, -894584436, 346600029, 805733487, -\n 691767750, 859030444, 1]\n print(Solution().minimumOperations(nums, 938, 80))\n",
"step-4": "<mask token>\nfrom typing import List\nfrom collections import deque\n\n\nclass Solution:\n\n def minimumOperations(self, nums: List[int], start: int, goal: int) ->int:\n que = deque([(start, 0)])\n visited = set()\n while que:\n x, steps = que.popleft()\n for i in nums:\n for t in [x + i, x - i, x ^ i]:\n if t == goal:\n return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps + 1))\n return -1\n\n\nif __name__ == '__main__':\n print(Solution().minimumOperations(nums=[2, 4, 12], start=2, goal=12))\n print(Solution().minimumOperations(nums=[3, 5, 7], start=0, goal=-4))\n print(Solution().minimumOperations(nums=[2, 8, 16], start=0, goal=1))\n nums = [-574083075, -393928592, -508025046, 942818778, 355796909, \n 515245901, 40297943, 106087952, 112856312, -516143616, 363801856, \n 431681353, 726373078, 947630603, 357311001, 594181298, -797268217, \n -741740009, 310972287, 588107527, -535699426, 56324906, -77958073, \n 739798122, -839472160, 439902753, -599749231, -378067373, -\n 466272504, -668036170, 404827976, 805486978, -762507067, 726001618,\n -761047930, 574054980, 365793614, 112020312, 612806855, -256862366,\n 174046424, 646109365, 263765015, 952305939, 864217737, -236873371, \n -991807014, 365730786, -908194963, -778205177, -949314048, -\n 636570500, -883257881, 316313456, -846577965, 132287864, -143230736,\n 425542510, -99852882, -845180792, -329895545, 402782707, -52191127,\n -470380017, -788836785, -655887976, -899430590, 481923982, 45348738,\n -595401481, -470990760, -417390352, -570278840, -873871723, -\n 905595403, 276201114, -733014032, 126018863, 452235438, -512574658,\n -172220362, 845468743, -743189114, 597319839, -584451932, 410604481,\n -508885990, -670396751, -765996786, 345814977, -920014372, -\n 826696704, 640912714, 119494504, 745808962, -503060001, -677959595,\n -831428592, 282855843, 150678167, -467803553, -503929808, 636431692,\n -235369757, -964826080, 93942566, -65314422, -385277528, -379647659,\n 601981747, -724269861, -516713072, -487487495, 655771565, 406499531,\n -943540581, -290169291, 438686645, -227355533, -822612523, \n 218329747, -800810927, -944724740, -978181517, 274815523, 296317841,\n 56043572, -712672386, -374759873, 86973233, -246165119, 73819230, -\n 801140338, 414767806, 883318746, -822063159, -705772942, -674915800,\n 710520717, -97115365, 599549847, 115344568, 53002314, 242487774, -\n 665998906, -986068895, -844909606, -515222297, -500827406, \n 317865850, -50395059, 522417393, 51184184, 241544846, -996297136, -\n 227251827, 924359619, 822815774, 149467545, 523511343, 252991991, \n 450254984, -393459583, 617410075, 197030479, -234418418, -256650708,\n 872334551, 779068346, 216294504, -708680875, -171498970, -970211466,\n -176493993, 729939373, -658054782, -342680218, 75508900, -377139149,\n 392008859, 121412250, -163586626, -468148273, 624248706, 50004864, \n -862378428, -849927586, 33598413, -157654824, -229712613, 149116317,\n 183820138, 378717707, -995563605, 777654910, 511275580, -157964872,\n -718605034, -764316227, -225837302, -166208500, -587688677, \n 78982205, -488693575, 667205793, 419165994, 731543316, 97551954, -\n 387317666, -580873271, 533504431, -31624036, -356035140, -849089082,\n -767376392, -625237600, 940717947, -337709497, 915255567, 727274007,\n -879463448, -363148174, -854892492, 110472344, -466194659, -\n 146843198, -454944217, -365338018, -349424052, 994474446, -\n 554968068, -883734951, -697723265, 583756420, -5696410, -413731452,\n -278706136, -399245668, 83345207, -227231270, 618384545, 846514423,\n -556667092, 590460194, -686116067, -509669269, -510065093, 77094171,\n 270317951, 166095128, -918526061, -766370855, -20861321, 478791777,\n 663673443, -152055285, 224745414, 123998803, 66824877, -85117337, \n 212126175, -718523523, 615359230, -212148589, 620733736, -81197397,\n 51814471, 709312024, 562145805, -770811828, 321230393, -611636320, \n -421337549, -804527290, -416739656, -886764000, 170695026, \n 414273830, -449987380, -56782953, 772039002, -961265403, -896009751,\n -524231358, 497253209, -507048459, -308522246, -508249054, -\n 53240581, -241704483, -974133571, 232897679, -152365934, -861310248,\n -305766289, 340680726, 844612779, -180227470, 40798478, 729446447, \n 395975250, -142447074, -606021375, 47555730, 294446347, 452346091, \n -409427076, -845574381, -838995437, 45787728, 714700474, -315824001,\n 694717388, 502723269, 119244099, -538412679, -207297135, -189078560,\n -812610469, -350061253, -73975237, -119323509, 791863263, 741180208,\n 740488891, -475394166, -191585617, -441527154, 767292531, 201222965,\n -150196525, 588513813, 245328283, 396662663, 100705864, 126789247, \n 487161165, -460512081, -469521559, -998848254, -917609155, \n 314537168, 418002454, -926920818, -628671538, 179971032, -105401559,\n 449618919, 823404672, 178494651, -773108884, 10686795, -506642993, \n -60172121, -510142552, 651623281, -163851428, 158562600, -782456228,\n -336697076, -571952851, 849878818, -456510759, -65997243, -\n 506043404, -558981572, 186946604, 124948039, 954065944, 707437320, \n -224056616, -319237038, 512138196, 742466011, -49725596, -784781640,\n -753413026, -331602365, -246166733, -658650959, -4888181, -\n 547553549, 786689548, -866846384, -212028209, -98029403, -325422497,\n -409855095, 320083382, -491251215, -471713326, 890922019, -\n 766590943, -481641953, -227197451, -709166930, -965945544, \n 407688175, -78385698, -372800469, 389036825, 79885300, -858488452, \n -390177477, 233839191, -518116358, 420408256, 872470025, 241770824,\n -106901417, -328631191, 548580365, -88408815, -647601013, 658880218,\n -870455388, 277154380, 370022702, -381519264, -800726224, 183685380,\n 208169777, 925905330, 732494840, 251754641, -681988029, 593628349, \n 153852085, 353590607, 242118102, -788094641, -242801844, 474214244,\n 579450364, 580046580, -269927114, 249739292, 295331955, -544556236,\n -814569172, 808895922, 707421114, 305101587, 621173158, -248896453,\n 988552702, -375313331, -87289858, -796466539, -529411285, -\n 197315984, 33984203, -122839651, -90735568, 277265491, 762059774, -\n 628018119, -406508643, -856856769, 364613737, 59319066, 614382155, \n -614620718, -133957131, -394985422, -29943491, 154443077, -72727846,\n 392096990, 562681453, 364248049, -156700958, 717335155, -343408748,\n 77301840, -155372684, -432114609, 414752267, -485732822, 876096548,\n 842614035, -614245110, -872219121, 291509502, 334817026, 214330487,\n 405297459, -449582485, 789314834, 936409758, 452350380, -146649749,\n 898255045, 116506422, 671728835, 280507922, -189039799, -565803074,\n -439924663, -14345985, -98428526, 57303809, 424685389, -84977856, -\n 9251973, 998935249, 229402894, -405424548, 448394272, 182149207, -\n 728030940, 347577568, 567511928, -27655302, 400866779, -509269521, \n -580602375, 405956020, -855173313, 258091129, 909162200, -315251598,\n -236890006, -531780379, 342955474, -65890269, -111521851, -\n 139906773, 34939329, 927781348, 300458386, -603518159, 341287362, -\n 234266006, 634183737, 454833275, 79631354, -954691672, 102295826, \n 688738167, -958428411, -293858940, 480440548, 590037773, -365477625,\n -425165732, 170388756, 164258145, -507355122, 44132561, 982798160, \n -101120201, -920959602, -239250887, 534862084, -834736952, -\n 123162323, 389682556, 656996523, 864481760, 381156936, 129520066, -\n 995551618, 106129054, -471580461, 856850511, 653020333, 531769579, \n -190375506, -992983956, 73867968, -931909584, 403329114, -945055546,\n 627782991, -666011011, 214665550, 505169020, 210703185, -591690068,\n 11218620, 790987020, 561646751, -33552011, -407054835, -850936697, \n -838201457, -878394038, -759131062, -857347819, 531582062, \n 941614352, -743754869, 650338718, 178603580, -834368178, -976933957,\n 138667533, 746471721, 551579035, -173400777, -1191455, 320121832, -\n 756997945, 402594806, 934711944, 970489131, -193223639, 276816990, \n 842959026, -799673669, -367385466, 681433973, 468892554, -455199860,\n 393993101, 905435993, 218314965, 284795080, 913357885, -652530417, \n 743455659, 869345718, 808902357, 829820413, 7206928, 544900359, \n 225903242, -507688526, 750219353, -663810717, -643969173, -\n 269151675, 348252329, -144351998, 693995296, -692546103, 869432378,\n 650161259, 568234384, 710782517, 179157604, -446849233, -922615096,\n -61183498, 30945194, 819052356, 467911324, 119876349, 46908453, -\n 420671619, 344944591, 889080726, -619477633, 174882730, 553799129, \n -941691933, 146036558, -116064711, 222282163, -272996845, -\n 147041859, -381977096, -786757040, 229096334, 712541239, 326039628,\n -952490563, -362214129, -680530864, 421358212, -472290821, -\n 331398150, -42297937, -393141325, -467541333, 655524006, 452908624,\n -626562356, -758303565, 338224482, 312047704, 599445442, -328430584,\n 259549134, 838272865, -755896597, -151000710, 607787908, 11870257, \n -680877184, 528161590, 769242561, -447486537, -127579653, 135915595,\n -271181270, 12536315, 693445551, 900639800, -692327759, -671179999,\n 977783490, 935798407, 659688020, -478438023, -852131846, -900332354,\n -71029072, 888095095, 924175448, 430392829, 391195112, 399460998, -\n 173259008, -168543477, -495967896, -697314804, 591126097, 301126906,\n 946273416, -772817341, -996445410, 466876435, -92937212, -226599286,\n 43831927, -588596503, -55759661, 212885530, -805455693, 572269060, \n 415773175, -320900489, -651775079, 5276363, 91615150, -882588415, \n 502210147, -401039810, 26713405, -723806893, 125439289, 472777644, \n 869504248, 967552969, -268043646, -146710780, -511973692, -\n 803204681, -146827180, -453201623, -878534466, 631307563, 507752930,\n -63646026, -348120807, 222898965, -410732708, 617953050, -478244422,\n 877782569, -507956686, -196516478, -477074335, 329039585, -\n 480651334, -890030740, 461391919, -977815738, -943937849, 321402466,\n -588396975, -945139052, 871313567, -484830305, 365305963, 891985414,\n 466048577, 880607400, -245705654, 359506342, -612177301, 840415132,\n 693541406, 707348310, 971762025, -871678269, 897143169, 625100531, \n 743908163, -315815019, -63211252, -962051459, 510469141, 566817231,\n -186207711, 309838979, 101194721, -127111899, -109107404, -\n 702499174, 918781433, 34041307, 927374088, -67369303, -680339659, \n 202481166, -218771120, 329951816, -280782626, -423403505, 619779171,\n -567310903, -660420942, 756801677, 996208091, 822990010, 940351540,\n 1331227, 382201579, 891956260, -894584436, 346600029, 805733487, -\n 691767750, 859030444, 1]\n print(Solution().minimumOperations(nums, 938, 80))\n",
"step-5": "'''\n-Medium-\n*BFS*\n\nYou are given a 0-indexed integer array nums containing distinct numbers, an integer start, and an integer goal. There is an integer x that is initially set to start, and you want to perform operations on x such that it is converted to goal. You can perform the following operation repeatedly on the number x:\n\nIf 0 <= x <= 1000, then for any index i in the array (0 <= i < nums.length), you can set x to any of the following:\n\nx + nums[i]\nx - nums[i]\nx ^ nums[i] (bitwise-XOR)\nNote that you can use each nums[i] any number of times in any order. Operations that set x to be out of the range 0 <= x <= 1000 are valid, but no more operations can be done afterward.\n\nReturn the minimum number of operations needed to convert x = start into goal, and -1 if it is not possible.\n\n \n\nExample 1:\n\nInput: nums = [2,4,12], start = 2, goal = 12\nOutput: 2\nExplanation: We can go from 2 → 14 → 12 with the following 2 operations.\n- 2 + 12 = 14\n- 14 - 2 = 12\nExample 2:\n\nInput: nums = [3,5,7], start = 0, goal = -4\nOutput: 2\nExplanation: We can go from 0 → 3 → -4 with the following 2 operations. \n- 0 + 3 = 3\n- 3 - 7 = -4\nNote that the last operation sets x out of the range 0 <= x <= 1000, which is valid.\nExample 3:\n\nInput: nums = [2,8,16], start = 0, goal = 1\nOutput: -1\nExplanation: There is no way to convert 0 into 1.\n \n\nConstraints:\n\n1 <= nums.length <= 1000\n-109 <= nums[i], goal <= 109\n0 <= start <= 1000\nstart != goal\nAll the integers in nums are distinct.\n\n\n'''\n\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def minimumOperations(self, nums: List[int], start: int, goal: int) -> int:\n \n que = deque([(start,0)]) \n visited = set() \n while que:\n x, steps = que.popleft() \n for i in nums:\n for t in [x+i, x-i, x^i]:\n if t == goal: return steps + 1\n if 0 <= t <= 1000 and t not in visited:\n visited.add(t)\n que.append((t, steps+1))\n return -1\n\n \n\n\n\n \n\n\nif __name__ == \"__main__\":\n print(Solution().minimumOperations(nums = [2,4,12], start = 2, goal = 12))\n print(Solution().minimumOperations(nums = [3,5,7], start = 0, goal = -4))\n print(Solution().minimumOperations(nums = [2,8,16], start = 0, goal = 1))\n nums = [-574083075,-393928592,-508025046,942818778,355796909,515245901,40297943,106087952,112856312,-516143616,363801856,431681353,726373078,947630603,357311001,594181298,-797268217,-741740009,310972287,588107527,-535699426,56324906,-77958073,739798122,-839472160,439902753,-599749231,-378067373,-466272504,-668036170,404827976,805486978,-762507067,726001618,-761047930,574054980,365793614,112020312,612806855,-256862366,174046424,646109365,263765015,952305939,864217737,-236873371,-991807014,365730786,-908194963,-778205177,-949314048,-636570500,-883257881,316313456,-846577965,132287864,-143230736,425542510,-99852882,-845180792,-329895545,402782707,-52191127,-470380017,-788836785,-655887976,-899430590,481923982,45348738,-595401481,-470990760,-417390352,-570278840,-873871723,-905595403,276201114,-733014032,126018863,452235438,-512574658,-172220362,845468743,-743189114,597319839,-584451932,410604481,-508885990,-670396751,-765996786,345814977,-920014372,-826696704,640912714,119494504,745808962,-503060001,-677959595,-831428592,282855843,150678167,-467803553,-503929808,636431692,-235369757,-964826080,93942566,-65314422,-385277528,-379647659,601981747,-724269861,-516713072,-487487495,655771565,406499531,-943540581,-290169291,438686645,-227355533,-822612523,218329747,-800810927,-944724740,-978181517,274815523,296317841,56043572,-712672386,-374759873,86973233,-246165119,73819230,-801140338,414767806,883318746,-822063159,-705772942,-674915800,710520717,-97115365,599549847,115344568,53002314,242487774,-665998906,-986068895,-844909606,-515222297,-500827406,317865850,-50395059,522417393,51184184,241544846,-996297136,-227251827,924359619,822815774,149467545,523511343,252991991,450254984,-393459583,617410075,197030479,-234418418,-256650708,872334551,779068346,216294504,-708680875,-171498970,-970211466,-176493993,729939373,-658054782,-342680218,75508900,-377139149,392008859,121412250,-163586626,-468148273,624248706,50004864,-862378428,-849927586,33598413,-157654824,-229712613,149116317,183820138,378717707,-995563605,777654910,511275580,-157964872,-718605034,-764316227,-225837302,-166208500,-587688677,78982205,-488693575,667205793,419165994,731543316,97551954,-387317666,-580873271,533504431,-31624036,-356035140,-849089082,-767376392,-625237600,940717947,-337709497,915255567,727274007,-879463448,-363148174,-854892492,110472344,-466194659,-146843198,-454944217,-365338018,-349424052,994474446,-554968068,-883734951,-697723265,583756420,-5696410,-413731452,-278706136,-399245668,83345207,-227231270,618384545,846514423,-556667092,590460194,-686116067,-509669269,-510065093,77094171,270317951,166095128,-918526061,-766370855,-20861321,478791777,663673443,-152055285,224745414,123998803,66824877,-85117337,212126175,-718523523,615359230,-212148589,620733736,-81197397,51814471,709312024,562145805,-770811828,321230393,-611636320,-421337549,-804527290,-416739656,-886764000,170695026,414273830,-449987380,-56782953,772039002,-961265403,-896009751,-524231358,497253209,-507048459,-308522246,-508249054,-53240581,-241704483,-974133571,232897679,-152365934,-861310248,-305766289,340680726,844612779,-180227470,40798478,729446447,395975250,-142447074,-606021375,47555730,294446347,452346091,-409427076,-845574381,-838995437,45787728,714700474,-315824001,694717388,502723269,119244099,-538412679,-207297135,-189078560,-812610469,-350061253,-73975237,-119323509,791863263,741180208,740488891,-475394166,-191585617,-441527154,767292531,201222965,-150196525,588513813,245328283,396662663,100705864,126789247,487161165,-460512081,-469521559,-998848254,-917609155,314537168,418002454,-926920818,-628671538,179971032,-105401559,449618919,823404672,178494651,-773108884,10686795,-506642993,-60172121,-510142552,651623281,-163851428,158562600,-782456228,-336697076,-571952851,849878818,-456510759,-65997243,-506043404,-558981572,186946604,124948039,954065944,707437320,-224056616,-319237038,512138196,742466011,-49725596,-784781640,-753413026,-331602365,-246166733,-658650959,-4888181,-547553549,786689548,-866846384,-212028209,-98029403,-325422497,-409855095,320083382,-491251215,-471713326,890922019,-766590943,-481641953,-227197451,-709166930,-965945544,407688175,-78385698,-372800469,389036825,79885300,-858488452,-390177477,233839191,-518116358,420408256,872470025,241770824,-106901417,-328631191,548580365,-88408815,-647601013,658880218,-870455388,277154380,370022702,-381519264,-800726224,183685380,208169777,925905330,732494840,251754641,-681988029,593628349,153852085,353590607,242118102,-788094641,-242801844,474214244,579450364,580046580,-269927114,249739292,295331955,-544556236,-814569172,808895922,707421114,305101587,621173158,-248896453,988552702,-375313331,-87289858,-796466539,-529411285,-197315984,33984203,-122839651,-90735568,277265491,762059774,-628018119,-406508643,-856856769,364613737,59319066,614382155,-614620718,-133957131,-394985422,-29943491,154443077,-72727846,392096990,562681453,364248049,-156700958,717335155,-343408748,77301840,-155372684,-432114609,414752267,-485732822,876096548,842614035,-614245110,-872219121,291509502,334817026,214330487,405297459,-449582485,789314834,936409758,452350380,-146649749,898255045,116506422,671728835,280507922,-189039799,-565803074,-439924663,-14345985,-98428526,57303809,424685389,-84977856,-9251973,998935249,229402894,-405424548,448394272,182149207,-728030940,347577568,567511928,-27655302,400866779,-509269521,-580602375,405956020,-855173313,258091129,909162200,-315251598,-236890006,-531780379,342955474,-65890269,-111521851,-139906773,34939329,927781348,300458386,-603518159,341287362,-234266006,634183737,454833275,79631354,-954691672,102295826,688738167,-958428411,-293858940,480440548,590037773,-365477625,-425165732,170388756,164258145,-507355122,44132561,982798160,-101120201,-920959602,-239250887,534862084,-834736952,-123162323,389682556,656996523,864481760,381156936,129520066,-995551618,106129054,-471580461,856850511,653020333,531769579,-190375506,-992983956,73867968,-931909584,403329114,-945055546,627782991,-666011011,214665550,505169020,210703185,-591690068,11218620,790987020,561646751,-33552011,-407054835,-850936697,-838201457,-878394038,-759131062,-857347819,531582062,941614352,-743754869,650338718,178603580,-834368178,-976933957,138667533,746471721,551579035,-173400777,-1191455,320121832,-756997945,402594806,934711944,970489131,-193223639,276816990,842959026,-799673669,-367385466,681433973,468892554,-455199860,393993101,905435993,218314965,284795080,913357885,-652530417,743455659,869345718,808902357,829820413,7206928,544900359,225903242,-507688526,750219353,-663810717,-643969173,-269151675,348252329,-144351998,693995296,-692546103,869432378,650161259,568234384,710782517,179157604,-446849233,-922615096,-61183498,30945194,819052356,467911324,119876349,46908453,-420671619,344944591,889080726,-619477633,174882730,553799129,-941691933,146036558,-116064711,222282163,-272996845,-147041859,-381977096,-786757040,229096334,712541239,326039628,-952490563,-362214129,-680530864,421358212,-472290821,-331398150,-42297937,-393141325,-467541333,655524006,452908624,-626562356,-758303565,338224482,312047704,599445442,-328430584,259549134,838272865,-755896597,-151000710,607787908,11870257,-680877184,528161590,769242561,-447486537,-127579653,135915595,-271181270,12536315,693445551,900639800,-692327759,-671179999,977783490,935798407,659688020,-478438023,-852131846,-900332354,-71029072,888095095,924175448,430392829,391195112,399460998,-173259008,-168543477,-495967896,-697314804,591126097,301126906,946273416,-772817341,-996445410,466876435,-92937212,-226599286,43831927,-588596503,-55759661,212885530,-805455693,572269060,415773175,-320900489,-651775079,5276363,91615150,-882588415,502210147,-401039810,26713405,-723806893,125439289,472777644,869504248,967552969,-268043646,-146710780,-511973692,-803204681,-146827180,-453201623,-878534466,631307563,507752930,-63646026,-348120807,222898965,-410732708,617953050,-478244422,877782569,-507956686,-196516478,-477074335,329039585,-480651334,-890030740,461391919,-977815738,-943937849,321402466,-588396975,-945139052,871313567,-484830305,365305963,891985414,466048577,880607400,-245705654,359506342,-612177301,840415132,693541406,707348310,971762025,-871678269,897143169,625100531,743908163,-315815019,-63211252,-962051459,510469141,566817231,-186207711,309838979,101194721,-127111899,-109107404,-702499174,918781433,34041307,927374088,-67369303,-680339659,202481166,-218771120,329951816,-280782626,-423403505,619779171,-567310903,-660420942,756801677,996208091,822990010,940351540,1331227,382201579,891956260,-894584436,346600029,805733487,-691767750,859030444,1]\n print(Solution().minimumOperations(nums, 938, 80))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#-*- coding: utf-8 -*-
import re
import sys
import os
import pandas as pd
import jieba
import logging
import argparse
from sklearn.externals import joblib
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import f1_score,accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.externals import joblib
import os
import argparse
import keras as ks
from sklearn.model_selection import train_test_split
#from keras.utils.np_utils import to_categorical
#from keras.models import Sequential
#from keras import layers
import pdb
import logging
from pyfasttext import FastText
outf = "test.txt"
inf = "remove_items.txt"
out = open(outf,'w')
inp = open(inf,'r')
#i = inp.readline()
#print(type(i))
#out.write(inp.readline())
validate_data_path = "/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv"
train_data_path = "/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv"
#load the data
def load_data_from_csv(file_name, header=0, encoding="utf-8"):
data_df = pd.read_csv(file_name, header=header, encoding=encoding)
return data_df
#train = load_data_from(train_data_path)
data = load_data_from_csv(validate_data_path)
out.write(data['content'][0].encode('utf-8'))
inp.close()
out.close()
|
normal
|
{
"blob_id": "c879230efe12bde9042159da221a2b9b4c1d8349",
"index": 198,
"step-1": "<mask token>\n\n\ndef load_data_from_csv(file_name, header=0, encoding='utf-8'):\n data_df = pd.read_csv(file_name, header=header, encoding=encoding)\n return data_df\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data_from_csv(file_name, header=0, encoding='utf-8'):\n data_df = pd.read_csv(file_name, header=header, encoding=encoding)\n return data_df\n\n\n<mask token>\nout.write(data['content'][0].encode('utf-8'))\ninp.close()\nout.close()\n",
"step-3": "<mask token>\noutf = 'test.txt'\ninf = 'remove_items.txt'\nout = open(outf, 'w')\ninp = open(inf, 'r')\nvalidate_data_path = (\n '/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv'\n )\ntrain_data_path = (\n '/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv'\n )\n\n\ndef load_data_from_csv(file_name, header=0, encoding='utf-8'):\n data_df = pd.read_csv(file_name, header=header, encoding=encoding)\n return data_df\n\n\ndata = load_data_from_csv(validate_data_path)\nout.write(data['content'][0].encode('utf-8'))\ninp.close()\nout.close()\n",
"step-4": "import re\nimport sys\nimport os\nimport pandas as pd\nimport jieba\nimport logging\nimport argparse\nfrom sklearn.externals import joblib\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nfrom sklearn.externals import joblib\nimport os\nimport argparse\nimport keras as ks\nfrom sklearn.model_selection import train_test_split\nimport pdb\nimport logging\nfrom pyfasttext import FastText\noutf = 'test.txt'\ninf = 'remove_items.txt'\nout = open(outf, 'w')\ninp = open(inf, 'r')\nvalidate_data_path = (\n '/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv'\n )\ntrain_data_path = (\n '/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv'\n )\n\n\ndef load_data_from_csv(file_name, header=0, encoding='utf-8'):\n data_df = pd.read_csv(file_name, header=header, encoding=encoding)\n return data_df\n\n\ndata = load_data_from_csv(validate_data_path)\nout.write(data['content'][0].encode('utf-8'))\ninp.close()\nout.close()\n",
"step-5": "#-*- coding: utf-8 -*-\nimport re\nimport sys\nimport os\nimport pandas as pd\nimport jieba\nimport logging\nimport argparse\nfrom sklearn.externals import joblib\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import f1_score,accuracy_score\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nfrom sklearn.externals import joblib\nimport os\nimport argparse\nimport keras as ks\nfrom sklearn.model_selection import train_test_split\n#from keras.utils.np_utils import to_categorical\n#from keras.models import Sequential\n#from keras import layers\nimport pdb\nimport logging\nfrom pyfasttext import FastText\n\noutf = \"test.txt\"\ninf = \"remove_items.txt\"\n\nout = open(outf,'w')\ninp = open(inf,'r')\n\n#i = inp.readline()\n#print(type(i))\n#out.write(inp.readline())\n\n\nvalidate_data_path = \"/data1/hjw/fine_grit_emotion_analysis/validation/ai_challenger_sentiment_analysis_validationset_20180816/sentiment_analysis_validationset.csv\"\ntrain_data_path = \"/data1/hjw/fine_grit_emotion_analysis/train/ai_challenger_sentiment_analysis_trainingset_20180816/sentiment_analysis_trainingset.csv\"\n#load the data\ndef load_data_from_csv(file_name, header=0, encoding=\"utf-8\"):\n\n data_df = pd.read_csv(file_name, header=header, encoding=encoding)\n\n return data_df\n\n#train = load_data_from(train_data_path)\ndata = load_data_from_csv(validate_data_path)\n\nout.write(data['content'][0].encode('utf-8'))\n\ninp.close()\nout.close()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from SpritesClass import Sprite
from JogadorClass import Jogador
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
class Tela:
def __init__(self,j,t0):
self.telas = ["jogo","game over"] #telas existentes
self.estagio = "jogo"
self.j = j
#sprites
self.v0 = Sprite(40,40,30,30,t0)
self.v1 = Sprite(40,80,30,30,t0)
self.v2 = Sprite(40,120,30,30,t0)
self.sprites = [self.v0,self.v1,self.v2]
def getEstagio(self):
return self.estagio
def setEstagio(self,temp):
if temp in self.telas:
self.estagio=temp
else:
print("Tela não existe, erro de digitação no código")
def getSprites(self):
return self.sprites
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
|
normal
|
{
"blob_id": "d1f0baa1ff87ece50aaded5e60908269e81b6734",
"index": 1952,
"step-1": "<mask token>\n\n\nclass Tela:\n <mask token>\n <mask token>\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n <mask token>\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-2": "<mask token>\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n <mask token>\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-3": "<mask token>\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-4": "from SpritesClass import Sprite\nfrom JogadorClass import Jogador\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-5": "from SpritesClass import Sprite\nfrom JogadorClass import Jogador\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nclass Tela:\n def __init__(self,j,t0):\n self.telas = [\"jogo\",\"game over\"] #telas existentes\n self.estagio = \"jogo\"\n self.j = j\n\n #sprites\n self.v0 = Sprite(40,40,30,30,t0)\n self.v1 = Sprite(40,80,30,30,t0)\n self.v2 = Sprite(40,120,30,30,t0)\n self.sprites = [self.v0,self.v1,self.v2]\n\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self,temp):\n if temp in self.telas:\n self.estagio=temp\n else:\n print(\"Tela não existe, erro de digitação no código\")\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in list(raw.keys()):
if len(i) > 8:
del raw[i]
print(raw)
print(len(list(raw.keys())))
np.save('shorten_raw_with_freq.npy', raw)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
raw = np.load('raw_with_freq.npy').item()
for i in list(raw.keys()):
if len(i) > 8:
del raw[i]
print(raw)
print(len(list(raw.keys())))
np.save('shorten_raw_with_freq.npy', raw)
<|reserved_special_token_1|>
import numpy as np
raw = np.load('raw_with_freq.npy').item()
for i in list(raw.keys()):
if len(i) > 8:
del raw[i]
print(raw)
print(len(list(raw.keys())))
np.save('shorten_raw_with_freq.npy', raw)
<|reserved_special_token_1|>
import numpy as np
raw = np.load("raw_with_freq.npy").item()
for i in list(raw.keys()):
if len(i) > 8:
del(raw[i])
print(raw)
print(len(list(raw.keys())))
np.save("shorten_raw_with_freq.npy", raw)
|
flexible
|
{
"blob_id": "ffb17b370c892696b341f6d37a2cfe106a5670a5",
"index": 4265,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n",
"step-3": "<mask token>\nraw = np.load('raw_with_freq.npy').item()\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n",
"step-4": "import numpy as np\nraw = np.load('raw_with_freq.npy').item()\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n",
"step-5": "import numpy as np\nraw = np.load(\"raw_with_freq.npy\").item()\nfor i in list(raw.keys()):\n\tif len(i) > 8:\n\t\tdel(raw[i])\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save(\"shorten_raw_with_freq.npy\", raw)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pub_socket.bind('tcp://*:%s' % port)
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze': (thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(0.02)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
port = '42000'
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind('tcp://*:%s' % port)
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze': (thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(0.02)
<|reserved_special_token_1|>
from __future__ import print_function
import zmq
import time
import random
import numpy as np
import msgpack as serializer
port = '42000'
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind('tcp://*:%s' % port)
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze': (thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(0.02)
<|reserved_special_token_1|>
from __future__ import print_function
import zmq
import time
import random
import numpy as np
import msgpack as serializer
port = '42000'
# let the OS choose the IP and PORT
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
# starting communication threads
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind("tcp://*:%s" % port)
# send messages
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze':(thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(.02)
|
flexible
|
{
"blob_id": "cb469b69bf974d39609f79c4f3be686d8106f971",
"index": 1431,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npub_socket.bind('tcp://*:%s' % port)\nwhile True:\n topic = 'test'\n thisX = np.random.rand()\n thisY = np.random.rand()\n testDict = {'gaze': (thisX, thisY)}\n pub_socket.send_string(topic, zmq.SNDMORE)\n pub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n print(testDict)\n time.sleep(0.02)\n",
"step-3": "<mask token>\nport = '42000'\nipc_sub_url = 'tcp://*:*'\nipc_push_url = 'tcp://*:*'\nzmq_ctx = zmq.Context()\npub_socket = zmq_ctx.socket(zmq.PUB)\npub_socket.bind('tcp://*:%s' % port)\nwhile True:\n topic = 'test'\n thisX = np.random.rand()\n thisY = np.random.rand()\n testDict = {'gaze': (thisX, thisY)}\n pub_socket.send_string(topic, zmq.SNDMORE)\n pub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n print(testDict)\n time.sleep(0.02)\n",
"step-4": "from __future__ import print_function\nimport zmq\nimport time\nimport random\nimport numpy as np\nimport msgpack as serializer\nport = '42000'\nipc_sub_url = 'tcp://*:*'\nipc_push_url = 'tcp://*:*'\nzmq_ctx = zmq.Context()\npub_socket = zmq_ctx.socket(zmq.PUB)\npub_socket.bind('tcp://*:%s' % port)\nwhile True:\n topic = 'test'\n thisX = np.random.rand()\n thisY = np.random.rand()\n testDict = {'gaze': (thisX, thisY)}\n pub_socket.send_string(topic, zmq.SNDMORE)\n pub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n print(testDict)\n time.sleep(0.02)\n",
"step-5": "from __future__ import print_function\nimport zmq\nimport time\nimport random\nimport numpy as np \nimport msgpack as serializer\n\nport = '42000'\n\n# let the OS choose the IP and PORT\nipc_sub_url = 'tcp://*:*'\nipc_push_url = 'tcp://*:*'\n\n# starting communication threads\nzmq_ctx = zmq.Context()\npub_socket = zmq_ctx.socket(zmq.PUB)\npub_socket.bind(\"tcp://*:%s\" % port)\n\n\n# send messages\nwhile True:\n\ttopic = 'test'\n\tthisX = np.random.rand()\n\tthisY = np.random.rand()\n\ttestDict = {'gaze':(thisX, thisY)}\n\tpub_socket.send_string(topic, zmq.SNDMORE)\n\tpub_socket.send(serializer.dumps(testDict, use_bin_type=True))\n\tprint(testDict)\n\ttime.sleep(.02)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import datetime
class Guest:
def __init__(self, Name, FamilyName, Car, controlboard,
CarRotationManager, ID=0, linkedplatform=None,Start=0): # --Initializing Guest credentials/info---
self.Name = Name
self.FamilyName = FamilyName
self.Car = Car
self.controlboard = controlboard
self.CarRotationManager = CarRotationManager
if ID == 0: # In this case, the guest would be a new guest, so when we register him as a guest we don't give him an ID, and we ask the controlboard to generate the ID
self.uniqueID = controlboard.set_id() # ----calling controlboard class to set ID---unique ID given by control board/decision engine
else: # In this case, the guest would have already parked before and he would already have an ID, so instead of generating a new ID we just give him his old one
self.uniqueID = ID
self.parked = False # Boolean variable which indicates if guest is parked or not
self.linkedplatform = None # Variable containing the platform where the guest's car is parked
self.Start=Start # This is the time when the guest parks
def parked_and_linkedplatform_value(self): # This function checks if the guest is parked and sets the values of linkedplatform and parked accordingly
(boolean, linkedplatform) = self.CarRotationManager.check_if_guest_parked(self)
if boolean == True:
self.parked = True
self.linkedplatform = linkedplatform
else:
self.parked = False
self.linkedplatform = None
def request_car(self): # Function that releases the car if it is parked
self.parked_and_linkedplatform_value()
if self.parked == False:
print("Your car is not parked!\n")
return
pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking
if (pos == -1):
print("Your car is not parked!\n")
return
self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position
self.CarRotationManager.release_car(self.linkedplatform) # Release the car
self.parked = False
self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1
print("Your " + self.Car.model + " has been released.")
print("Have a great day " + self.Name + "!\n")
self.controlboard.remove_guest_from_file(self) # We remove the guest from the file once his car is not parked anymore
def park_car(self): # Function that parks the guest's car if it's not already parked
self.parked_and_linkedplatform_value()
if (self.parked == True):
print("Your car is already parked!\n")
return
platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM
if (platform == None):
return -1 # PARKING IS FULL
self.CarRotationManager.return_platform_to_base(platform.Position)
platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM
self.linkedplatform = platform
self.parked = True
self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1
print("Your " + self.Car.model + " has been parked!\n")
now = datetime.now() # Get the current time, i.e when the user parks his car
array = str(now).split()
string_into_file = array[0] + "@" + array[1]
self.controlboard.add_guest_to_file(self,string_into_file) # Add the current time (when the user parked) next to his information in the guest file
self.Start=string_into_file
|
normal
|
{
"blob_id": "3553fa72cb831f82a1030b9eadc9594eee1d1422",
"index": 2152,
"step-1": "<mask token>\n\n\nclass Guest:\n <mask token>\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n <mask token>\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n",
"step-2": "<mask token>\n\n\nclass Guest:\n <mask token>\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n\n def request_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == False:\n print('Your car is not parked!\\n')\n return\n pos = self.CarRotationManager.get_platform_position(self)\n if pos == -1:\n print('Your car is not parked!\\n')\n return\n self.CarRotationManager.return_platform_to_base(pos)\n self.CarRotationManager.release_car(self.linkedplatform)\n self.parked = False\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms - 1)\n print('Your ' + self.Car.model + ' has been released.')\n print('Have a great day ' + self.Name + '!\\n')\n self.controlboard.remove_guest_from_file(self)\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n",
"step-3": "<mask token>\n\n\nclass Guest:\n\n def __init__(self, Name, FamilyName, Car, controlboard,\n CarRotationManager, ID=0, linkedplatform=None, Start=0):\n self.Name = Name\n self.FamilyName = FamilyName\n self.Car = Car\n self.controlboard = controlboard\n self.CarRotationManager = CarRotationManager\n if ID == 0:\n self.uniqueID = controlboard.set_id()\n else:\n self.uniqueID = ID\n self.parked = False\n self.linkedplatform = None\n self.Start = Start\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n\n def request_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == False:\n print('Your car is not parked!\\n')\n return\n pos = self.CarRotationManager.get_platform_position(self)\n if pos == -1:\n print('Your car is not parked!\\n')\n return\n self.CarRotationManager.return_platform_to_base(pos)\n self.CarRotationManager.release_car(self.linkedplatform)\n self.parked = False\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms - 1)\n print('Your ' + self.Car.model + ' has been released.')\n print('Have a great day ' + self.Name + '!\\n')\n self.controlboard.remove_guest_from_file(self)\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n",
"step-4": "from datetime import datetime\n\n\nclass Guest:\n\n def __init__(self, Name, FamilyName, Car, controlboard,\n CarRotationManager, ID=0, linkedplatform=None, Start=0):\n self.Name = Name\n self.FamilyName = FamilyName\n self.Car = Car\n self.controlboard = controlboard\n self.CarRotationManager = CarRotationManager\n if ID == 0:\n self.uniqueID = controlboard.set_id()\n else:\n self.uniqueID = ID\n self.parked = False\n self.linkedplatform = None\n self.Start = Start\n\n def parked_and_linkedplatform_value(self):\n boolean, linkedplatform = (self.CarRotationManager.\n check_if_guest_parked(self))\n if boolean == True:\n self.parked = True\n self.linkedplatform = linkedplatform\n else:\n self.parked = False\n self.linkedplatform = None\n\n def request_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == False:\n print('Your car is not parked!\\n')\n return\n pos = self.CarRotationManager.get_platform_position(self)\n if pos == -1:\n print('Your car is not parked!\\n')\n return\n self.CarRotationManager.return_platform_to_base(pos)\n self.CarRotationManager.release_car(self.linkedplatform)\n self.parked = False\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms - 1)\n print('Your ' + self.Car.model + ' has been released.')\n print('Have a great day ' + self.Name + '!\\n')\n self.controlboard.remove_guest_from_file(self)\n\n def park_car(self):\n self.parked_and_linkedplatform_value()\n if self.parked == True:\n print('Your car is already parked!\\n')\n return\n platform = self.CarRotationManager.return_empty_platform()\n if platform == None:\n return -1\n self.CarRotationManager.return_platform_to_base(platform.Position)\n platform.link(self)\n self.linkedplatform = platform\n self.parked = True\n self.CarRotationManager.occupiedPlatforms = (self.\n CarRotationManager.occupiedPlatforms + 1)\n print('Your ' + self.Car.model + ' has been parked!\\n')\n now = datetime.now()\n array = str(now).split()\n string_into_file = array[0] + '@' + array[1]\n self.controlboard.add_guest_to_file(self, string_into_file)\n self.Start = string_into_file\n",
"step-5": "from datetime import datetime\r\n\r\nclass Guest:\r\n def __init__(self, Name, FamilyName, Car, controlboard,\r\n CarRotationManager, ID=0, linkedplatform=None,Start=0): # --Initializing Guest credentials/info---\r\n self.Name = Name\r\n self.FamilyName = FamilyName\r\n self.Car = Car\r\n self.controlboard = controlboard\r\n self.CarRotationManager = CarRotationManager\r\n if ID == 0: # In this case, the guest would be a new guest, so when we register him as a guest we don't give him an ID, and we ask the controlboard to generate the ID\r\n self.uniqueID = controlboard.set_id() # ----calling controlboard class to set ID---unique ID given by control board/decision engine\r\n else: # In this case, the guest would have already parked before and he would already have an ID, so instead of generating a new ID we just give him his old one\r\n self.uniqueID = ID\r\n self.parked = False # Boolean variable which indicates if guest is parked or not\r\n self.linkedplatform = None # Variable containing the platform where the guest's car is parked\r\n self.Start=Start # This is the time when the guest parks\r\n\r\n def parked_and_linkedplatform_value(self): # This function checks if the guest is parked and sets the values of linkedplatform and parked accordingly\r\n (boolean, linkedplatform) = self.CarRotationManager.check_if_guest_parked(self)\r\n if boolean == True:\r\n self.parked = True\r\n self.linkedplatform = linkedplatform\r\n else:\r\n self.parked = False\r\n self.linkedplatform = None\r\n\r\n def request_car(self): # Function that releases the car if it is parked\r\n self.parked_and_linkedplatform_value()\r\n if self.parked == False:\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking\r\n if (pos == -1):\r\n print(\"Your car is not parked!\\n\")\r\n return\r\n self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position\r\n self.CarRotationManager.release_car(self.linkedplatform) # Release the car\r\n self.parked = False\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1\r\n print(\"Your \" + self.Car.model + \" has been released.\")\r\n print(\"Have a great day \" + self.Name + \"!\\n\")\r\n self.controlboard.remove_guest_from_file(self) # We remove the guest from the file once his car is not parked anymore\r\n\r\n def park_car(self): # Function that parks the guest's car if it's not already parked\r\n self.parked_and_linkedplatform_value()\r\n if (self.parked == True):\r\n print(\"Your car is already parked!\\n\")\r\n return\r\n platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM\r\n if (platform == None):\r\n return -1 # PARKING IS FULL\r\n self.CarRotationManager.return_platform_to_base(platform.Position)\r\n platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM\r\n self.linkedplatform = platform\r\n self.parked = True\r\n self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1\r\n print(\"Your \" + self.Car.model + \" has been parked!\\n\")\r\n now = datetime.now() # Get the current time, i.e when the user parks his car\r\n array = str(now).split()\r\n string_into_file = array[0] + \"@\" + array[1]\r\n self.controlboard.add_guest_to_file(self,string_into_file) # Add the current time (when the user parked) next to his information in the guest file\r\n self.Start=string_into_file\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
"""
Calculate trigger efficiency error
"""
__author__ = "XIAO Suyu<xiaosuyu@ihep.ac.cn>"
__copyright__ = "Copyright (c) XIAO Suyu"
__created__ = "[2018-02-06 Tue 15:25]"
import math
n1 = 4212.0
n2 = 4237.0
N = 5000.0
eff = n1 / n2
err = math.sqrt(eff*(1-eff)/N)
print 'trig_eff = %.4f +- %f' % (eff, err)
|
normal
|
{
"blob_id": "bac3f78b8eb9c4595bc9e8b85587819f92329729",
"index": 2295,
"step-1": "#!/usr/bin/env python\n\"\"\"\nCalculate trigger efficiency error\n\"\"\"\n\n__author__ = \"XIAO Suyu<xiaosuyu@ihep.ac.cn>\"\n__copyright__ = \"Copyright (c) XIAO Suyu\"\n__created__ = \"[2018-02-06 Tue 15:25]\"\n\nimport math\n\nn1 = 4212.0\nn2 = 4237.0\nN = 5000.0\n\neff = n1 / n2\nerr = math.sqrt(eff*(1-eff)/N)\n\nprint 'trig_eff = %.4f +- %f' % (eff, err)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def user(request):
context = {'users': User.objects.all(), 'user_level': User.objects.get(
id=request.session['user_id'])}
return render(request, 'dashboard/user.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def user(request):
context = {'users': User.objects.all(), 'user_level': User.objects.get(
id=request.session['user_id'])}
return render(request, 'dashboard/user.html', context)
def admin(request):
context = {'users': User.objects.all(), 'user_level': User.objects.get(
id=request.session['user_id'])}
return render(request, 'dashboard/admin.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from ..login.models import *
def user(request):
context = {'users': User.objects.all(), 'user_level': User.objects.get(
id=request.session['user_id'])}
return render(request, 'dashboard/user.html', context)
def admin(request):
context = {'users': User.objects.all(), 'user_level': User.objects.get(
id=request.session['user_id'])}
return render(request, 'dashboard/admin.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from .. login.models import *
def user(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/user.html', context)
def admin(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/admin.html', context)
|
flexible
|
{
"blob_id": "3d737d0ee9c3af1f8ebe4c6998ad30fa34f42856",
"index": 570,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom ..login.models import *\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom .. login.models import *\n\ndef user(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/user.html', context)\n\ndef admin(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/admin.html', context)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import yet
import pickle
sources = pickle.load(open("./db/source_list"))
addr_list = sources.keys()
'''
for i in range(len(addr_list)):
print addr_list[i],
try:
a = yet.tree(None, sources[addr_list[i]])
print ' Owner :',
for i in a.owner.keys():
print i+ '() ' + a.owner[i][1]['name'] + ',',
except Exception as e:
pass
#print 'error!'
print ''
'''
compiled = yet.solc.compile_source(open("./test.sol").read(100000))
ast = compiled[compiled.keys()[0]]['ast']
b = yet.tree(ast)
print 'modifier list'
for i in b.modifier_list:
print i['attributes']['name']
print 'function list'
for i in b.function_list:
print i['attributes']['name']
print ''
for i in b.public_function_list:
print i['attributes']['name']
print b.owner
'''
import pickle
import solc
import re
import utils.getsource as gs
import utils.verified_parse as vp
sources = pickle.load(open('./db/real_source_list', 'r'))
addr_list = sources.keys()
new_sources = {}
compiled_list = []
err_count = 0
for i in range(len(addr_list)):
print str(i)
#print gs.comment_remover(sources[addr_list[i]])
#print gs.clear(sources[addr_list[i]])
try:
new_sources[addr_list[i]] = re.sub('pragma.+[\n]', '', gs.clear(sources[addr_list[i]]))
except:
print 'fuck!!'
err_count += 1
#compiled_list.append(solc.compile_source(tmp))
pickle.dump(new_sources, open("./db/real_source_list.tmp", "wb"))
print 'total error count : ' + str(err_count)
for i in addr_list:
tmp_source = gs.comment_remover(sources[i])
print gs.getcontractname(tmp_source)
'''
|
normal
|
{
"blob_id": "1c55cfa03cd9210b7cf9e728732afe19930e9a41",
"index": 9786,
"step-1": "import yet\nimport pickle\n\nsources = pickle.load(open(\"./db/source_list\"))\naddr_list = sources.keys()\n\n'''\nfor i in range(len(addr_list)):\n print addr_list[i], \n try:\n a = yet.tree(None, sources[addr_list[i]])\n\n print ' Owner :',\n\n for i in a.owner.keys():\n print i+ '() ' + a.owner[i][1]['name'] + ',',\n except Exception as e:\n pass\n #print 'error!'\n print ''\n'''\n\ncompiled = yet.solc.compile_source(open(\"./test.sol\").read(100000))\nast = compiled[compiled.keys()[0]]['ast']\n\nb = yet.tree(ast)\n\nprint 'modifier list'\nfor i in b.modifier_list:\n print i['attributes']['name']\n\nprint 'function list'\nfor i in b.function_list:\n print i['attributes']['name']\nprint ''\n\nfor i in b.public_function_list:\n print i['attributes']['name']\n\nprint b.owner\n\n\n'''\nimport pickle\nimport solc\nimport re\n\nimport utils.getsource as gs\nimport utils.verified_parse as vp\n\nsources = pickle.load(open('./db/real_source_list', 'r'))\naddr_list = sources.keys()\n\nnew_sources = {}\n\ncompiled_list = []\nerr_count = 0\nfor i in range(len(addr_list)):\n print str(i)\n\n #print gs.comment_remover(sources[addr_list[i]])\n #print gs.clear(sources[addr_list[i]])\n\n try:\n new_sources[addr_list[i]] = re.sub('pragma.+[\\n]', '', gs.clear(sources[addr_list[i]]))\n except:\n print 'fuck!!'\n err_count += 1\n\n #compiled_list.append(solc.compile_source(tmp))\n\npickle.dump(new_sources, open(\"./db/real_source_list.tmp\", \"wb\"))\n\nprint 'total error count : ' + str(err_count)\nfor i in addr_list:\n tmp_source = gs.comment_remover(sources[i])\n\n print gs.getcontractname(tmp_source)\n'''\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def dist(counts):
n = abs(counts['n'] - counts['s'])
nw = abs(counts['nw'] - counts['se'])
ne = abs(counts['ne'] - counts['sw'])
return n + max(ne, nw)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def dist(counts):
n = abs(counts['n'] - counts['s'])
nw = abs(counts['nw'] - counts['se'])
ne = abs(counts['ne'] - counts['sw'])
return n + max(ne, nw)
if __name__ == '__main__':
counts = defaultdict(int)
with open('day11.input.txt') as f:
INPUT = f.read().strip()
dir_list = INPUT.split(',')
for dir in dir_list:
counts[dir] += 1
print(dist(counts))
counts = defaultdict(int)
with open('day11.input.txt') as f:
INPUT = f.read().strip()
dir_list = INPUT.split(',')
max_d = -1
for dir in dir_list:
counts[dir] += 1
max_d = max(max_d, dist(counts))
print('max=', max_d)
<|reserved_special_token_1|>
from collections import defaultdict
def dist(counts):
n = abs(counts['n'] - counts['s'])
nw = abs(counts['nw'] - counts['se'])
ne = abs(counts['ne'] - counts['sw'])
return n + max(ne, nw)
if __name__ == '__main__':
counts = defaultdict(int)
with open('day11.input.txt') as f:
INPUT = f.read().strip()
dir_list = INPUT.split(',')
for dir in dir_list:
counts[dir] += 1
print(dist(counts))
counts = defaultdict(int)
with open('day11.input.txt') as f:
INPUT = f.read().strip()
dir_list = INPUT.split(',')
max_d = -1
for dir in dir_list:
counts[dir] += 1
max_d = max(max_d, dist(counts))
print('max=', max_d)
<|reserved_special_token_1|>
from collections import defaultdict
# The order of the steps doesn't matter, so the distance
# function is very simple
def dist(counts):
n = abs(counts["n"] - counts["s"])
nw = abs(counts["nw"] - counts["se"])
ne = abs(counts["ne"] - counts["sw"])
return n + max(ne,nw)
if __name__ == "__main__":
counts = defaultdict(int)
with open("day11.input.txt") as f:
INPUT = f.read().strip()
dir_list = INPUT.split(",")
# The order of the steps doesn't matter so we just need
# to count each type of step
for dir in dir_list:
counts[dir] += 1
print(dist(counts))
counts = defaultdict(int)
with open("day11.input.txt") as f:
INPUT = f.read().strip()
dir_list = INPUT.split(",")
# print(dir_list)
max_d = -1
for dir in dir_list:
# Keep running counts and check for distance at every
# step to find max
counts[dir] += 1
max_d = max(max_d,dist(counts))
print("max=", max_d)
|
flexible
|
{
"blob_id": "ac2e9145e3345e5448683d684b69d2356e3214ce",
"index": 9999,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef dist(counts):\n n = abs(counts['n'] - counts['s'])\n nw = abs(counts['nw'] - counts['se'])\n ne = abs(counts['ne'] - counts['sw'])\n return n + max(ne, nw)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef dist(counts):\n n = abs(counts['n'] - counts['s'])\n nw = abs(counts['nw'] - counts['se'])\n ne = abs(counts['ne'] - counts['sw'])\n return n + max(ne, nw)\n\n\nif __name__ == '__main__':\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n for dir in dir_list:\n counts[dir] += 1\n print(dist(counts))\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n max_d = -1\n for dir in dir_list:\n counts[dir] += 1\n max_d = max(max_d, dist(counts))\n print('max=', max_d)\n",
"step-4": "from collections import defaultdict\n\n\ndef dist(counts):\n n = abs(counts['n'] - counts['s'])\n nw = abs(counts['nw'] - counts['se'])\n ne = abs(counts['ne'] - counts['sw'])\n return n + max(ne, nw)\n\n\nif __name__ == '__main__':\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n for dir in dir_list:\n counts[dir] += 1\n print(dist(counts))\n counts = defaultdict(int)\n with open('day11.input.txt') as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(',')\n max_d = -1\n for dir in dir_list:\n counts[dir] += 1\n max_d = max(max_d, dist(counts))\n print('max=', max_d)\n",
"step-5": "from collections import defaultdict\n\n# The order of the steps doesn't matter, so the distance\n# function is very simple\ndef dist(counts):\n n = abs(counts[\"n\"] - counts[\"s\"])\n nw = abs(counts[\"nw\"] - counts[\"se\"])\n ne = abs(counts[\"ne\"] - counts[\"sw\"])\n return n + max(ne,nw)\n\nif __name__ == \"__main__\":\n counts = defaultdict(int)\n with open(\"day11.input.txt\") as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(\",\")\n # The order of the steps doesn't matter so we just need\n # to count each type of step\n for dir in dir_list:\n counts[dir] += 1\n\n print(dist(counts))\n\n counts = defaultdict(int)\n with open(\"day11.input.txt\") as f:\n INPUT = f.read().strip()\n dir_list = INPUT.split(\",\")\n # print(dir_list)\n max_d = -1\n for dir in dir_list:\n # Keep running counts and check for distance at every\n # step to find max\n counts[dir] += 1\n max_d = max(max_d,dist(counts))\n print(\"max=\", max_d)\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('orders', '0005_alter_orderitem_price')]
operations = [migrations.AddField(model_name='order', name=
'being_delivered', field=models.BooleanField(default=False)),
migrations.AddField(model_name='order', name='payment_id', field=
models.CharField(blank=True, max_length=150)), migrations.AddField(
model_name='order', name='ref_code', field=models.CharField(blank=
True, max_length=20, null=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('orders', '0005_alter_orderitem_price')]
operations = [migrations.AddField(model_name='order', name=
'being_delivered', field=models.BooleanField(default=False)),
migrations.AddField(model_name='order', name='payment_id', field=
models.CharField(blank=True, max_length=150)), migrations.AddField(
model_name='order', name='ref_code', field=models.CharField(blank=
True, max_length=20, null=True))]
<|reserved_special_token_1|>
# Generated by Django 3.2 on 2021-05-03 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0005_alter_orderitem_price'),
]
operations = [
migrations.AddField(
model_name='order',
name='being_delivered',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='order',
name='payment_id',
field=models.CharField(blank=True, max_length=150),
),
migrations.AddField(
model_name='order',
name='ref_code',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
flexible
|
{
"blob_id": "f3b466dc5b6149be82b096791ca8445faf169380",
"index": 5216,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('orders', '0005_alter_orderitem_price')]\n operations = [migrations.AddField(model_name='order', name=\n 'being_delivered', field=models.BooleanField(default=False)),\n migrations.AddField(model_name='order', name='payment_id', field=\n models.CharField(blank=True, max_length=150)), migrations.AddField(\n model_name='order', name='ref_code', field=models.CharField(blank=\n True, max_length=20, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('orders', '0005_alter_orderitem_price')]\n operations = [migrations.AddField(model_name='order', name=\n 'being_delivered', field=models.BooleanField(default=False)),\n migrations.AddField(model_name='order', name='payment_id', field=\n models.CharField(blank=True, max_length=150)), migrations.AddField(\n model_name='order', name='ref_code', field=models.CharField(blank=\n True, max_length=20, null=True))]\n",
"step-5": "# Generated by Django 3.2 on 2021-05-03 17:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0005_alter_orderitem_price'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='being_delivered',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='order',\n name='payment_id',\n field=models.CharField(blank=True, max_length=150),\n ),\n migrations.AddField(\n model_name='order',\n name='ref_code',\n field=models.CharField(blank=True, max_length=20, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, '-.k')
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, '-.k')
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString, Polygon
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, '-.k')
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString, Polygon
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, "-.k")
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
|
flexible
|
{
"blob_id": "9096ed4b68d2bef92df7db98589e744ddf3efad0",
"index": 350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n",
"step-3": "<mask token>\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nfrom shapely.geometry import MultiLineString, Polygon\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nfrom shapely.geometry import MultiLineString, Polygon\n\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\n\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, \"-.k\")\nplt.xlim([0, 5])\nplt.ylim([0, 2])\n\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import numpy as np
def weight_init(layers):
for layer in layers:
if isinstance(layer, nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
elif isinstance(layer, nn.Linear):
n = layer.in_features
y = 1.0 / np.sqrt(n)
layer.weight.data.uniform_(-y, y)
layer.bias.data.fill_(0)
# nn.init.kaiming_normal_(layer.weight.data, nonlinearity='relu')
# 传统的预测点击率模型
class LR(nn.Module):
def __init__(self,
feature_nums,
output_dim = 1):
super(LR, self).__init__()
self.linear = nn.Linear(feature_nums, output_dim)
self.bias = nn.Parameter(torch.zeros((output_dim,)))
def forward(self, x):
"""
:param x: Int tensor of size (batch_size, feature_nums, latent_nums)
:return: pctrs
"""
out = self.bias + torch.sum(self.linear(x), dim=1)
return out.unsqueeze(1)
class RNN(nn.Module):
def __init__(self,
feature_nums,
hidden_dims,
bi_lstm,
out_dims=1):
super(RNN, self).__init__()
self.feature_nums = feature_nums # 输入数据特征维度
self.hidden_dims = hidden_dims # 隐藏层维度
self.bi_lism = bi_lstm # LSTM串联数量
self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)
self.out = nn.Linear(self.hidden_dims, out_dims)
def forward(self,x):
x1, _ = self.lstm(x)
a, b, c = x1.shape
out = self.out(x1.view(-1, c))
out1 = out.view(a, b, -1)
return out1
class MLP(nn.Module):
def __init__(self,
feature_nums,
neuron_nums,
dropout_rate,
output_dim=1):
super(MLP, self).__init__()
self.feature_nums = feature_nums
self.neuron_nums = neuron_nums
self.dropout_rate = dropout_rate
deep_input_dims = self.feature_nums
layers = list()
neuron_nums = self.neuron_nums
for neuron_num in neuron_nums:
layers.append(nn.Linear(deep_input_dims, neuron_num))
# layers.append(nn.BatchNorm1d(neuron_num))
layers.append(nn.ReLU())
layers.append(nn.Dropout(p=0.2))
deep_input_dims = neuron_num
weight_init(layers)
layers.append(nn.Linear(deep_input_dims, output_dim))
self.mlp = nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Int tensor of size (batch_size, feature_nums, latent_nums)
:return: pctrs
"""
out = self.mlp(x)
return out
|
normal
|
{
"blob_id": "2c2b075f9ea9e8d6559e44ad09d3e7767c48205e",
"index": 6772,
"step-1": "<mask token>\n\n\nclass LR(nn.Module):\n <mask token>\n <mask token>\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-2": "<mask token>\n\n\nclass LR(nn.Module):\n\n def __init__(self, feature_nums, output_dim=1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-3": "<mask token>\n\n\ndef weight_init(layers):\n for layer in layers:\n if isinstance(layer, nn.BatchNorm1d):\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n elif isinstance(layer, nn.Linear):\n n = layer.in_features\n y = 1.0 / np.sqrt(n)\n layer.weight.data.uniform_(-y, y)\n layer.bias.data.fill_(0)\n\n\nclass LR(nn.Module):\n\n def __init__(self, feature_nums, output_dim=1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nimport numpy as np\n\n\ndef weight_init(layers):\n for layer in layers:\n if isinstance(layer, nn.BatchNorm1d):\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n elif isinstance(layer, nn.Linear):\n n = layer.in_features\n y = 1.0 / np.sqrt(n)\n layer.weight.data.uniform_(-y, y)\n layer.bias.data.fill_(0)\n\n\nclass LR(nn.Module):\n\n def __init__(self, feature_nums, output_dim=1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n\n def __init__(self, feature_nums, hidden_dims, bi_lstm, out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums\n self.hidden_dims = hidden_dims\n self.bi_lism = bi_lstm\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self, x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n return out1\n\n\nclass MLP(nn.Module):\n\n def __init__(self, feature_nums, neuron_nums, dropout_rate, output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n deep_input_dims = self.feature_nums\n layers = list()\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n weight_init(layers)\n layers.append(nn.Linear(deep_input_dims, output_dim))\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n return out\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\n\nimport numpy as np\n\ndef weight_init(layers):\n for layer in layers:\n if isinstance(layer, nn.BatchNorm1d):\n layer.weight.data.fill_(1)\n layer.bias.data.zero_()\n elif isinstance(layer, nn.Linear):\n n = layer.in_features\n y = 1.0 / np.sqrt(n)\n layer.weight.data.uniform_(-y, y)\n layer.bias.data.fill_(0)\n # nn.init.kaiming_normal_(layer.weight.data, nonlinearity='relu')\n\n# 传统的预测点击率模型\nclass LR(nn.Module):\n def __init__(self,\n feature_nums,\n output_dim = 1):\n super(LR, self).__init__()\n self.linear = nn.Linear(feature_nums, output_dim)\n\n self.bias = nn.Parameter(torch.zeros((output_dim,)))\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.bias + torch.sum(self.linear(x), dim=1)\n\n return out.unsqueeze(1)\n\n\nclass RNN(nn.Module):\n def __init__(self,\n feature_nums,\n hidden_dims,\n bi_lstm,\n out_dims=1):\n super(RNN, self).__init__()\n self.feature_nums = feature_nums # 输入数据特征维度\n self.hidden_dims = hidden_dims # 隐藏层维度\n self.bi_lism = bi_lstm # LSTM串联数量\n\n self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)\n self.out = nn.Linear(self.hidden_dims, out_dims)\n\n def forward(self,x):\n x1, _ = self.lstm(x)\n a, b, c = x1.shape\n out = self.out(x1.view(-1, c))\n out1 = out.view(a, b, -1)\n\n return out1\n\nclass MLP(nn.Module):\n def __init__(self,\n feature_nums,\n neuron_nums,\n dropout_rate,\n output_dim=1):\n super(MLP, self).__init__()\n self.feature_nums = feature_nums\n self.neuron_nums = neuron_nums\n self.dropout_rate = dropout_rate\n\n deep_input_dims = self.feature_nums\n\n layers = list()\n\n neuron_nums = self.neuron_nums\n for neuron_num in neuron_nums:\n layers.append(nn.Linear(deep_input_dims, neuron_num))\n # layers.append(nn.BatchNorm1d(neuron_num))\n layers.append(nn.ReLU())\n layers.append(nn.Dropout(p=0.2))\n deep_input_dims = neuron_num\n\n weight_init(layers)\n\n layers.append(nn.Linear(deep_input_dims, output_dim))\n\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n :param x: Int tensor of size (batch_size, feature_nums, latent_nums)\n :return: pctrs\n \"\"\"\n out = self.mlp(x)\n\n return out",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
num1 = input("첫 번째 실수 : ")
num2 = input("두 번째 실수 : ")
print(float(num1) + float(num2))
num1 = float(input("첫 번째 실수 : "))
num2 = float(input("두 번째 실수 : "))
print(num1 + num2)
|
normal
|
{
"blob_id": "ee8bf681adcb07c4f79245c8f118131bbcabd2fa",
"index": 7920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(float(num1) + float(num2))\n<mask token>\nprint(num1 + num2)\n",
"step-3": "num1 = input('첫 번째 실수 : ')\nnum2 = input('두 번째 실수 : ')\nprint(float(num1) + float(num2))\nnum1 = float(input('첫 번째 실수 : '))\nnum2 = float(input('두 번째 실수 : '))\nprint(num1 + num2)\n",
"step-4": "num1 = input(\"첫 번째 실수 : \")\r\nnum2 = input(\"두 번째 실수 : \")\r\n\r\nprint(float(num1) + float(num2))\r\n\r\nnum1 = float(input(\"첫 번째 실수 : \"))\r\nnum2 = float(input(\"두 번째 실수 : \"))\r\n\r\nprint(num1 + num2)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_scores_query = evaluator.text2img_scores(input_ids=input_ids,
embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None,
random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle
=False)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,
return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',
return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (patches, neg_patches, input_ids, attention_mask,
neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(
dataloader)):
is_paired = 1.0
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
query_dict_im2txt[img_name[0]] = im2txt_query_scores
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)
im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(
dataloader)
txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)
txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(
dataloader)
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(
im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(
im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(
txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(
txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_scores_query = evaluator.text2img_scores(input_ids=input_ids,
embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None,
random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle
=False)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,
return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',
return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (patches, neg_patches, input_ids, attention_mask,
neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(
dataloader)):
is_paired = 1.0
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
query_dict_im2txt[img_name[0]] = im2txt_query_scores
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)
im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(
dataloader)
txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)
txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(
dataloader)
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(
im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(
im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(
txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(
txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help=
'Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help=
'Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help=
'Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help=
'Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help=
'using random_patches True or False', default=False)
args = parser.parse_args()
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
test(dataset, device, args.save_results_name, pretrained_model=args.
path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
<|reserved_special_token_1|>
import torch, torchvision
import torch.nn.functional as F
import transformers
from transformers import BertTokenizer, BertModel
from transformers.models.bert.modeling_bert import BertPreTrainingHeads
from utils import construct_bert_input, EvaluationDataset, save_json
from fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set
import argparse
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_scores_query = evaluator.text2img_scores(input_ids=input_ids,
embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None,
random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle
=False)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,
return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',
return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (patches, neg_patches, input_ids, attention_mask,
neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(
dataloader)):
is_paired = 1.0
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
query_dict_im2txt[img_name[0]] = im2txt_query_scores
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)
im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(
dataloader)
txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)
txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(
dataloader)
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(
im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(
im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(
txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(
txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help=
'Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help=
'Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help=
'Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help=
'Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help=
'using random_patches True or False', default=False)
args = parser.parse_args()
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
test(dataset, device, args.save_results_name, pretrained_model=args.
path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
<|reserved_special_token_1|>
import torch, torchvision
import torch.nn.functional as F
import transformers
from transformers import BertTokenizer, BertModel
from transformers.models.bert.modeling_bert import BertPreTrainingHeads
from utils import construct_bert_input, EvaluationDataset, save_json
from fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set
import argparse
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self,
input_ids,
embeds,
att_mask,
embeds_n, # list
att_mask_n, # list
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
# Score for positive
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(
embeds=embeds.to(device),
attention_mask=att_mask.to(device),
labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device),
only_alignment=True,
)
# label = score_pos[1]
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item() # confidence that is actually positive
score_pos_dict = {'text': input_ids,
'score': score_p,
'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
# Scores for negative
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(
embeds=embeds_n[n].to(device),
attention_mask=att_mask_n[n].to(device),
labels=input_ids.to(device),
is_paired=torch.tensor(False).to(device),
only_alignment=True,
)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item() # confidence that is actually positive
score_neg_dict = {'text': input_ids,
'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p, input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
# Score for positive
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(
embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device),
labels=input_ids_p.to(device),
is_paired=torch.tensor(True).to(device),
only_alignment=True,
)
# label = score_pos[1]
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item() # confidence that is actually positive
score_pos_dict = {'text': input_ids_p,
'score': score_p,
'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
# Scores for negative
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(
embeds=embeds_n[n].to(device),
attention_mask=att_mask_n[n].to(device),
labels=input_ids_n[n].to(device),
is_paired=torch.tensor(False).to(device),
only_alignment=True,
)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item() # confidence that is actually positive
score_neg_dict = {'text': input_ids_n[n],
'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
# print(evaluator.tokenizer.convert_ids_to_tokens(ids))
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, (found / len(dict_scores.keys())))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(
self,
embeds, # text + image embedded
attention_mask, # text + image attention mask
labels=None, # [batch, 448]
is_paired=None, # [batch]
only_alignment=False,
):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds,
attention_mask=attention_mask,
return_dict=True)
sequence_output = outputs.last_hidden_state # [batch, seq_length, hidden_size]
pooler_output = outputs.pooler_output # [batch_size, hidden_size] last layer of hidden-state of first token (CLS) + linear layer + tanh
# hidden states corresponding to the text part
text_output = sequence_output[:, :labels.shape[1], :] # [batch, 448, 768]
# hidden states corresponding to the image part
image_output = sequence_output[:, labels.shape[1]:, :] # [batch, 64, 768]
### FOR TEXT
# Predict the masked text tokens and alignment scores (whether image and text match)
prediction_scores, alignment_scores = self.cls(text_output, pooler_output)
# prediction score is [batch, 448, vocab_size = 30522]
# aligment score is [batch, 2] 2 with logits corresponding to 1 and 0
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator, alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
# Text
text_pred_logits = text_evaluator['text_pred_logits'] # [num_aligned, 448, vocab_size]
text_labels = text_evaluator['text_labels'] # [num_aligned, 448]
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten() # [num_algined, 448]
# Alignment
alig_pred_logits = alignment_evaluator['alignment_logits'] # [1, 2]
alig_labels = alignment_evaluator['alignment_labels'] # [2]
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
# alig_labels = alig_labels.double().cpu().numpy().flatten()
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten() # [1, 2]
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,
evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1)
# NEGATIVE SAMPLE # [batch, 100, 448]
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :] # [1, 448]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample, evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0, embeds_neg.shape[1] - neg_input_id_sample.shape[1]),
value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
# Now I have all joint embeddings for 1 positive sample and 100 neg samples
all_scores_query = evaluator.img2text_scores(
input_ids_p=input_ids,
embeds_p=embeds,
att_mask_p=attention_mask_mm,
input_ids_n=all_neg_inputs,
embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
# Accuracy: only in positive example
txt_acc, alig_acc = evaluator.get_scores_and_metrics(
embeds, # text + image embedded
attention_mask_mm,
labels=input_ids, # [batch, 448]
is_paired=is_paired, # [batch]
only_alignment=False,
)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,
evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
# before constructing bert, att mask is 448 long
# POSITIVE IMAGE
embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1) # [1, 512]
# NEGATIVE SAMPLES
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] - input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
# Now I have all joint embeddings for 1 positive sample and 100 neg samples
all_scores_query = evaluator.text2img_scores(
input_ids=input_ids,
embeds=embeds,
att_mask=attention_mask_mm,
embeds_n=all_embeds_neg, # list
att_mask_n=all_att_mask) # list
# Accuracy: only in positive example
txt_acc, alig_acc = evaluator.get_scores_and_metrics(
embeds, # text + image embedded
attention_mask_mm, # [batch,
labels=input_ids, # [batch, 448]
is_paired=is_paired, # [batch]
only_alignment=False,
)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None, random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model, return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased', return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (
patches, neg_patches, input_ids, attention_mask, neg_input_ids, neg_attention_mask, img_name) in enumerate(
tqdm(dataloader)):
# ****** Shapes ********
# input_ids shape: [1, 448]
# neg_input_ids shape: [1, NUM_SAMPLES=100, 448]
# neg_patches: [1, NUM_SAMPLES=100, 64, 2048]
# IMAGE 2 TEXT
is_paired = 1.
# print('im2text..')
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(patches, neg_patches, input_ids,
is_paired, attention_mask,
neg_input_ids, neg_attention_mask,
evaluator, random_patches)
# print('done')
# Accuracies
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
# For Rank @ K
query_dict_im2txt[img_name[0]] = im2txt_query_scores
# TEXT 2 IMAGE
# print('txt2img..')
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(patches, neg_patches, input_ids,
is_paired, attention_mask,
neg_input_ids, neg_attention_mask,
evaluator, random_patches)
# print('done')
# Accuracies
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
# For Rank @ K
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = (running_acc_pred_im2txt / len(dataloader))
im2txt_test_set_accuracy_alig = (running_acc_alignment_im2txt / len(dataloader))
txt2im_test_set_accuracy_pred = (running_acc_pred_txt2im / len(dataloader))
txt2im_test_set_accuracy_alig = (running_acc_alignment_txt2im / len(dataloader))
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help='Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help='Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help='Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help='Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help='using random_patches True or False', default=False)
args = parser.parse_args()
# 1) Builds the 1000 sample dataset. This corresponds to the fashionibert_evaluator_parser file
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
# savefile_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
# print('Done!')
# 2) Evaluate-
# eval_set_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'
# path_to_trained_model = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/'
# path_to_save_json = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/results.json'
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
# test(dataset, device, args.num_subsamples, args.save_file_name, args.path_to_pretrained_model)
test(dataset, device, args.save_results_name, pretrained_model=args.path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
|
flexible
|
{
"blob_id": "7a01bffa5d7f0d5ecff57c97478f2cf5e9a27538",
"index": 1210,
"step-1": "<mask token>\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_scores_query = evaluator.text2img_scores(input_ids=input_ids,\n embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None,\n random_patches=False):\n torch.cuda.empty_cache()\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle\n =False)\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,\n return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',\n return_dict=True)\n evaluator.to(device)\n evaluator.eval()\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n with torch.no_grad():\n for i, (patches, neg_patches, input_ids, attention_mask,\n neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(\n dataloader)):\n is_paired = 1.0\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)\n im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(\n dataloader)\n txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)\n txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(\n dataloader)\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(\n im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(\n txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n save_json(save_file_name, results)\n\n\n<mask token>\n",
"step-3": "<mask token>\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_scores_query = evaluator.text2img_scores(input_ids=input_ids,\n embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None,\n random_patches=False):\n torch.cuda.empty_cache()\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle\n =False)\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,\n return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',\n return_dict=True)\n evaluator.to(device)\n evaluator.eval()\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n with torch.no_grad():\n for i, (patches, neg_patches, input_ids, attention_mask,\n neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(\n dataloader)):\n is_paired = 1.0\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)\n im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(\n dataloader)\n txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)\n txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(\n dataloader)\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(\n im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(\n txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n save_json(save_file_name, results)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Evaluate FashionBert')\n parser.add_argument('--path_to_train_dataset', help=\n 'Absolute path to .pkl file used for training')\n parser.add_argument('--path_to_pretrained_model', help=\n 'Path to pretrained model', default=None)\n parser.add_argument('--save_test_set', help=\n 'Name to save test set .pkl', default='test_set.pkl')\n parser.add_argument('--save_results_name', help=\n 'Name to save file with results', default='results.json')\n parser.add_argument('--random_patches', help=\n 'using random_patches True or False', default=False)\n args = parser.parse_args()\n print('Processing the dataset...')\n dataset = EvaluationDataset(args.path_to_train_dataset)\n print('Done!')\n print('\\nGetting aligned pairs...')\n get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)\n print('Loading dataset...')\n dataset = Evaluation_negpairs(args.save_test_set)\n print('Starting evaluation...')\n test(dataset, device, args.save_results_name, pretrained_model=args.\n path_to_pretrained_model, random_patches=args.random_patches)\n print('Done!!!')\n",
"step-4": "import torch, torchvision\nimport torch.nn.functional as F\nimport transformers\nfrom transformers import BertTokenizer, BertModel\nfrom transformers.models.bert.modeling_bert import BertPreTrainingHeads\nfrom utils import construct_bert_input, EvaluationDataset, save_json\nfrom fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.bert = BertModel(config)\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,\n eps=config.layer_norm_eps)\n self.cls = BertPreTrainingHeads(config)\n self.init_weights()\n\n def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),\n attention_mask=att_mask.to(device), labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device), only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids.to(device), is_paired=torch.tensor(False).to(\n device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids, 'score': score_n, 'label':\n False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,\n input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n query_dict_scores = []\n query_scores = []\n query_labels = []\n score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device), labels=input_ids_p.to(\n device), is_paired=torch.tensor(True).to(device),\n only_alignment=True)\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item()\n score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(\n device), attention_mask=att_mask_n[n].to(device), labels=\n input_ids_n[n].to(device), is_paired=torch.tensor(False).to\n (device), only_alignment=True)\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item()\n score_neg_dict = {'text': input_ids_n[n], 'score': score_n,\n 'label': False}\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key\n =lambda x: x[0], reverse=True)]\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, found / len(\n dict_scores.keys()))\n logs += l3\n print(l3)\n return logs\n\n def get_scores_and_metrics(self, embeds, attention_mask, labels=None,\n is_paired=None, only_alignment=False):\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n outputs = self.bert(inputs_embeds=embeds, attention_mask=\n attention_mask, return_dict=True)\n sequence_output = outputs.last_hidden_state\n pooler_output = outputs.pooler_output\n text_output = sequence_output[:, :labels.shape[1], :]\n image_output = sequence_output[:, labels.shape[1]:, :]\n prediction_scores, alignment_scores = self.cls(text_output,\n pooler_output)\n if only_alignment:\n return alignment_scores, is_paired\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n text_acc, alig_acc = self.accuracy_scores(text_evaluator,\n alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n text_pred_logits = text_evaluator['text_pred_logits']\n text_labels = text_evaluator['text_labels']\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten()\n alig_pred_logits = alignment_evaluator['alignment_logits']\n alig_labels = alignment_evaluator['alignment_labels']\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n embeds_neg = construct_bert_input(patches, neg_input_id_sample,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, \n embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,\n embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=\n all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n embeds = construct_bert_input(patches, input_ids, evaluator, device=\n device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg = []\n all_att_mask = []\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids,\n evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -\n input_ids.shape[1]), value=1)\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_scores_query = evaluator.text2img_scores(input_ids=input_ids,\n embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,\n attention_mask_mm, labels=input_ids, is_paired=is_paired,\n only_alignment=False)\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None,\n random_patches=False):\n torch.cuda.empty_cache()\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle\n =False)\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,\n return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',\n return_dict=True)\n evaluator.to(device)\n evaluator.eval()\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n with torch.no_grad():\n for i, (patches, neg_patches, input_ids, attention_mask,\n neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(\n dataloader)):\n is_paired = 1.0\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(\n patches, neg_patches, input_ids, is_paired, attention_mask,\n neg_input_ids, neg_attention_mask, evaluator, random_patches)\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)\n im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(\n dataloader)\n txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)\n txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(\n dataloader)\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(\n im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(\n txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(\n txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n save_json(save_file_name, results)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Evaluate FashionBert')\n parser.add_argument('--path_to_train_dataset', help=\n 'Absolute path to .pkl file used for training')\n parser.add_argument('--path_to_pretrained_model', help=\n 'Path to pretrained model', default=None)\n parser.add_argument('--save_test_set', help=\n 'Name to save test set .pkl', default='test_set.pkl')\n parser.add_argument('--save_results_name', help=\n 'Name to save file with results', default='results.json')\n parser.add_argument('--random_patches', help=\n 'using random_patches True or False', default=False)\n args = parser.parse_args()\n print('Processing the dataset...')\n dataset = EvaluationDataset(args.path_to_train_dataset)\n print('Done!')\n print('\\nGetting aligned pairs...')\n get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)\n print('Loading dataset...')\n dataset = Evaluation_negpairs(args.save_test_set)\n print('Starting evaluation...')\n test(dataset, device, args.save_results_name, pretrained_model=args.\n path_to_pretrained_model, random_patches=args.random_patches)\n print('Done!!!')\n",
"step-5": "import torch, torchvision\nimport torch.nn.functional as F\nimport transformers\nfrom transformers import BertTokenizer, BertModel\nfrom transformers.models.bert.modeling_bert import BertPreTrainingHeads\nfrom utils import construct_bert_input, EvaluationDataset, save_json\nfrom fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set\n\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass FashionbertEvaluator(transformers.BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n\n self.im_to_embedding = torch.nn.Linear(2048, 768)\n self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.cls = BertPreTrainingHeads(config)\n\n self.init_weights()\n\n def text2img_scores(self,\n input_ids,\n embeds,\n att_mask,\n embeds_n, # list\n att_mask_n, # list\n ):\n \"\"\"\n INPUTS:\n input_ids [1, 448]\n embeds: [1, 512, 768]\n att_mask: [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n # Score for positive\n query_dict_scores = []\n query_scores = []\n query_labels = []\n\n score_pos = self.get_scores_and_metrics(\n embeds=embeds.to(device),\n attention_mask=att_mask.to(device),\n labels=input_ids.to(device),\n is_paired=torch.tensor(True).to(device),\n only_alignment=True,\n )\n\n # label = score_pos[1]\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item() # confidence that is actually positive\n score_pos_dict = {'text': input_ids,\n 'score': score_p,\n 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n\n # Scores for negative\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(\n embeds=embeds_n[n].to(device),\n attention_mask=att_mask_n[n].to(device),\n labels=input_ids.to(device),\n is_paired=torch.tensor(False).to(device),\n only_alignment=True,\n )\n\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item() # confidence that is actually positive\n score_neg_dict = {'text': input_ids,\n 'score': score_n,\n 'label': False}\n\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]\n return S\n\n def img2text_scores(self, input_ids_p, embeds_p, att_mask_p, input_ids_n, embeds_n, att_mask_n):\n \"\"\"\n INPUTS:\n input_ids_p : [1, 448]\n embeds_p: [1, 512, 768]\n att_mask_p: [1, 448]\n input_ids_n: list with 100 of [1, 448]\n embeds_n: list with 100 of [1, 512, 768]\n att_mask_n: list with 100 of [1, 448]\n \"\"\"\n # Score for positive\n query_dict_scores = []\n query_scores = []\n query_labels = []\n\n score_pos = self.get_scores_and_metrics(\n embeds=embeds_p.to(device),\n attention_mask=att_mask_p.to(device),\n labels=input_ids_p.to(device),\n is_paired=torch.tensor(True).to(device),\n only_alignment=True,\n )\n\n # label = score_pos[1]\n score_p = score_pos[0].squeeze()\n score_p = score_p[1].detach().item() # confidence that is actually positive\n score_pos_dict = {'text': input_ids_p,\n 'score': score_p,\n 'label': True}\n query_dict_scores.append(score_pos_dict)\n query_scores.append(score_p)\n query_labels.append(True)\n\n # Scores for negative\n for n in range(len(embeds_n)):\n score_neg = self.get_scores_and_metrics(\n embeds=embeds_n[n].to(device),\n attention_mask=att_mask_n[n].to(device),\n labels=input_ids_n[n].to(device),\n is_paired=torch.tensor(False).to(device),\n only_alignment=True,\n )\n\n score_n = score_neg[0].squeeze()\n score_n = score_n[1].detach().item() # confidence that is actually positive\n score_neg_dict = {'text': input_ids_n[n],\n 'score': score_n,\n 'label': False}\n\n query_dict_scores.append(score_neg_dict)\n query_scores.append(score_n)\n query_labels.append(False)\n\n # print(evaluator.tokenizer.convert_ids_to_tokens(ids))\n S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]\n\n return S\n\n def rank_at_K(self, dict_scores, img2text=True):\n logs = ''\n\n if img2text:\n l1 = '------ Image 2 Text ------\\n'\n logs += l1\n print(l1)\n else:\n l2 = '------ Text 2 Image ------\\n'\n print(l2)\n\n Ks = [1, 5, 10]\n for K in Ks:\n found = 0\n for key, val in dict_scores.items():\n tmp_range = K if K < len(val) else len(val)\n for i in range(tmp_range):\n score, label = val[i]\n if label:\n found += 1\n break\n l3 = '------ Rank @ {} = {} ------\\n'.format(K, (found / len(dict_scores.keys())))\n logs += l3\n print(l3)\n\n return logs\n\n def get_scores_and_metrics(\n self,\n embeds, # text + image embedded\n attention_mask, # text + image attention mask\n labels=None, # [batch, 448]\n is_paired=None, # [batch]\n only_alignment=False,\n ):\n\n batch_size = embeds.shape[0]\n seq_length = embeds.shape[1]\n hidden_dim = embeds.shape[2]\n\n embeds = embeds.to(device)\n attention_mask = attention_mask.to(device)\n\n outputs = self.bert(inputs_embeds=embeds,\n attention_mask=attention_mask,\n return_dict=True)\n\n sequence_output = outputs.last_hidden_state # [batch, seq_length, hidden_size]\n pooler_output = outputs.pooler_output # [batch_size, hidden_size] last layer of hidden-state of first token (CLS) + linear layer + tanh\n\n # hidden states corresponding to the text part\n text_output = sequence_output[:, :labels.shape[1], :] # [batch, 448, 768]\n # hidden states corresponding to the image part\n image_output = sequence_output[:, labels.shape[1]:, :] # [batch, 64, 768]\n\n ### FOR TEXT\n # Predict the masked text tokens and alignment scores (whether image and text match)\n prediction_scores, alignment_scores = self.cls(text_output, pooler_output)\n # prediction score is [batch, 448, vocab_size = 30522]\n # aligment score is [batch, 2] 2 with logits corresponding to 1 and 0\n\n if only_alignment:\n return alignment_scores, is_paired\n\n text_evaluator = {'text_pred_logits': prediction_scores,\n 'text_labels': labels}\n\n alignment_evaluator = {'alignment_logits': alignment_scores,\n 'alignment_labels': is_paired}\n\n text_acc, alig_acc = self.accuracy_scores(text_evaluator, alignment_evaluator)\n return text_acc, alig_acc\n\n def accuracy_scores(self, text_evaluator, alignment_evaluator):\n \"\"\"\n Text evaluator: dictionary with preds and labels (aligned)\n Image evaluator: dictionary with image output and image patches (aligned)\n \"\"\"\n # Text\n text_pred_logits = text_evaluator['text_pred_logits'] # [num_aligned, 448, vocab_size]\n text_labels = text_evaluator['text_labels'] # [num_aligned, 448]\n\n text_preds_logits = text_pred_logits.detach().cpu().numpy()\n text_labels = text_labels.cpu().numpy().flatten()\n text_preds = np.argmax(text_preds_logits, axis=2).flatten() # [num_algined, 448]\n\n # Alignment\n alig_pred_logits = alignment_evaluator['alignment_logits'] # [1, 2]\n alig_labels = alignment_evaluator['alignment_labels'] # [2]\n\n alig_pred_logits = alig_pred_logits.detach().cpu().numpy()\n alig_labels = np.asarray([alig_labels])\n # alig_labels = alig_labels.double().cpu().numpy().flatten()\n alig_preds = np.argmax(alig_pred_logits, axis=1).flatten() # [1, 2]\n\n text_acc = accuracy_score(text_labels, text_preds)\n alig_acc = accuracy_score(alig_labels, alig_preds)\n\n return text_acc, alig_acc\n\n\ndef image2text(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,\n evaluator, random_patches):\n \"\"\"\n image2text retrieval:\n Query = Image\n Paired with: 1 positive text, 100 negative texts\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n\n embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1)\n\n # NEGATIVE SAMPLE # [batch, 100, 448]\n all_embeds_neg = []\n all_att_mask = []\n all_neg_inputs = []\n\n for j in range(len_neg_inputs):\n neg_input_id_sample = neg_input_ids[:, j, :] # [1, 448]\n neg_attention_mask_sample = neg_attention_mask[:, j, :]\n\n embeds_neg = construct_bert_input(patches, neg_input_id_sample, evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(neg_attention_mask_sample, (0, embeds_neg.shape[1] - neg_input_id_sample.shape[1]),\n value=1)\n\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n all_neg_inputs.append(neg_input_id_sample.detach())\n\n # Now I have all joint embeddings for 1 positive sample and 100 neg samples\n all_scores_query = evaluator.img2text_scores(\n input_ids_p=input_ids,\n embeds_p=embeds,\n att_mask_p=attention_mask_mm,\n input_ids_n=all_neg_inputs,\n embeds_n=all_embeds_neg,\n att_mask_n=all_att_mask)\n\n # Accuracy: only in positive example\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(\n embeds, # text + image embedded\n attention_mask_mm,\n labels=input_ids, # [batch, 448]\n is_paired=is_paired, # [batch]\n only_alignment=False,\n )\n\n return all_scores_query, txt_acc, alig_acc\n\n\ndef text2image(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,\n evaluator, random_patches):\n \"\"\"\n text2image retrieval:\n Query = Text\n Paired with: 1 positive image, 100 negative images\n \"\"\"\n im_seq_len = patches.shape[1]\n bs = input_ids.shape[0]\n len_neg_inputs = neg_input_ids.shape[1]\n\n # before constructing bert, att mask is 448 long\n # POSITIVE IMAGE\n embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)\n attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1) # [1, 512]\n\n # NEGATIVE SAMPLES\n all_embeds_neg = []\n all_att_mask = []\n\n for p in range(len_neg_inputs):\n neg_patches_sample = neg_patches[:, p, :, :]\n embeds_neg = construct_bert_input(neg_patches_sample, input_ids, evaluator, device=device, random_patches=random_patches)\n attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] - input_ids.shape[1]), value=1)\n\n all_embeds_neg.append(embeds_neg)\n all_att_mask.append(attention_mask_neg)\n\n # Now I have all joint embeddings for 1 positive sample and 100 neg samples\n all_scores_query = evaluator.text2img_scores(\n input_ids=input_ids,\n embeds=embeds,\n att_mask=attention_mask_mm,\n embeds_n=all_embeds_neg, # list\n att_mask_n=all_att_mask) # list\n\n # Accuracy: only in positive example\n txt_acc, alig_acc = evaluator.get_scores_and_metrics(\n embeds, # text + image embedded\n attention_mask_mm, # [batch,\n labels=input_ids, # [batch, 448]\n is_paired=is_paired, # [batch]\n only_alignment=False,\n )\n\n return all_scores_query, txt_acc, alig_acc\n\n\ndef test(dataset, device, save_file_name, pretrained_model=None, random_patches=False):\n torch.cuda.empty_cache()\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n )\n\n if pretrained_model != None:\n evaluator = FashionbertEvaluator.from_pretrained(pretrained_model, return_dict=True)\n else:\n evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased', return_dict=True)\n\n evaluator.to(device)\n evaluator.eval()\n\n query_dict_im2txt = {}\n query_dict_txt2im = {}\n running_acc_alignment_im2txt = 0.0\n running_acc_pred_im2txt = 0.0\n running_acc_alignment_txt2im = 0.0\n running_acc_pred_txt2im = 0.0\n\n with torch.no_grad():\n for i, (\n patches, neg_patches, input_ids, attention_mask, neg_input_ids, neg_attention_mask, img_name) in enumerate(\n tqdm(dataloader)):\n # ****** Shapes ********\n # input_ids shape: [1, 448]\n # neg_input_ids shape: [1, NUM_SAMPLES=100, 448]\n # neg_patches: [1, NUM_SAMPLES=100, 64, 2048]\n\n # IMAGE 2 TEXT\n\n is_paired = 1.\n # print('im2text..')\n im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(patches, neg_patches, input_ids,\n is_paired, attention_mask,\n neg_input_ids, neg_attention_mask,\n evaluator, random_patches)\n\n # print('done')\n\n # Accuracies\n running_acc_pred_im2txt += im2txt_pred_acc\n running_acc_alignment_im2txt += im2txt_alig_acc\n\n # For Rank @ K\n query_dict_im2txt[img_name[0]] = im2txt_query_scores\n\n # TEXT 2 IMAGE\n # print('txt2img..')\n txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(patches, neg_patches, input_ids,\n is_paired, attention_mask,\n neg_input_ids, neg_attention_mask,\n evaluator, random_patches)\n\n # print('done')\n\n # Accuracies\n running_acc_pred_txt2im += txt2im_pred_acc\n running_acc_alignment_txt2im += txt2im_alig_acc\n\n # For Rank @ K\n query_dict_txt2im[img_name[0]] = txt2im_query_scores\n\n im2txt_test_set_accuracy_pred = (running_acc_pred_im2txt / len(dataloader))\n im2txt_test_set_accuracy_alig = (running_acc_alignment_im2txt / len(dataloader))\n txt2im_test_set_accuracy_pred = (running_acc_pred_txt2im / len(dataloader))\n txt2im_test_set_accuracy_alig = (running_acc_alignment_txt2im / len(dataloader))\n\n print()\n results = ''\n log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\\n'\n log2 = evaluator.rank_at_K(query_dict_im2txt, True)\n log3 = '---- Accuracy in token predictions: {} -----\\n'.format(im2txt_test_set_accuracy_pred)\n log4 = '---- Accuracy in text-image alignment: {} -----\\n'.format(im2txt_test_set_accuracy_alig)\n print(log1)\n print(log2)\n print(log3)\n print(log4)\n print()\n log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\\n'\n log6 = evaluator.rank_at_K(query_dict_txt2im, False)\n log7 = '---- Accuracy in token predictions: {} -----\\n'.format(txt2im_test_set_accuracy_pred)\n log8 = '---- Accuracy in text-image alignment: {} -----\\n'.format(txt2im_test_set_accuracy_alig)\n print(log5)\n print(log6)\n print(log7)\n print(log8)\n\n results += log1\n results += log2\n results += log3\n results += log4\n results += log5\n results += log6\n results += log7\n results += log8\n\n save_json(save_file_name, results)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Evaluate FashionBert')\n parser.add_argument('--path_to_train_dataset', help='Absolute path to .pkl file used for training')\n parser.add_argument('--path_to_pretrained_model', help='Path to pretrained model', default=None)\n parser.add_argument('--save_test_set', help='Name to save test set .pkl', default='test_set.pkl')\n parser.add_argument('--save_results_name', help='Name to save file with results', default='results.json')\n parser.add_argument('--random_patches', help='using random_patches True or False', default=False)\n args = parser.parse_args()\n\n # 1) Builds the 1000 sample dataset. This corresponds to the fashionibert_evaluator_parser file\n print('Processing the dataset...')\n dataset = EvaluationDataset(args.path_to_train_dataset)\n # savefile_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'\n print('Done!')\n print('\\nGetting aligned pairs...')\n get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)\n # print('Done!')\n\n # 2) Evaluate-\n\n # eval_set_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'\n # path_to_trained_model = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/'\n # path_to_save_json = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/results.json'\n print('Loading dataset...')\n dataset = Evaluation_negpairs(args.save_test_set)\n print('Starting evaluation...')\n # test(dataset, device, args.num_subsamples, args.save_file_name, args.path_to_pretrained_model)\n test(dataset, device, args.save_results_name, pretrained_model=args.path_to_pretrained_model, random_patches=args.random_patches)\n print('Done!!!')\n\n",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Jul 19 16:02:11 2010
# by: The Resource Compiler for PyQt (Qt v4.6.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x07\xb6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xaf\xc8\x37\x05\x8a\xe9\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\x48\x49\x44\x41\x54\x78\xda\x62\
\xfc\xff\xff\x3f\xc3\x40\x02\x80\x00\x62\x62\x18\x60\x00\x10\x40\
\x03\xee\x00\x80\x00\x1a\x70\x07\x00\x04\xd0\x80\x3b\x00\x20\x80\
\x58\x40\x84\xb1\x71\x2a\xc3\x8f\x1f\x3f\x99\x7e\xfe\xfc\xce\xfd\
\xff\xff\xbf\xbf\x4a\x4a\xaa\x86\x52\x52\x02\x59\x3c\x3c\x8c\x66\
\x7f\xfe\xfc\x61\x7e\xf2\xe4\xed\x8d\xd3\xa7\xf7\x2d\xfb\xf6\xed\
\xdd\x76\x25\x25\xc7\x9f\x0c\x0c\xff\xa0\xda\x19\x21\xbe\x60\x82\
\xf8\x83\x91\x91\x11\x8e\x99\x98\x18\x19\x40\xe9\x1b\xc4\x86\xc9\
\x81\xc4\x98\x98\x98\x19\x7f\xff\xfe\xf4\xe5\xcc\x99\xa5\xe0\xd4\
\x0f\x10\x40\x60\x07\xfc\xfb\xf7\xdf\x4e\x45\x45\xa6\x41\x50\x90\
\x97\x07\x98\x2b\xfe\x72\x70\x70\x6a\x70\x71\x31\x0a\xb0\xb1\x31\
\x81\x2d\xe1\xe5\xe5\x55\x94\x92\x8a\x75\xff\xf7\xef\xef\x65\x06\
\x86\xff\x3f\x41\x86\xb0\xb3\xb3\x83\x03\x10\x64\xfe\xef\xdf\xbf\
\x41\x06\x83\x2d\x01\x7a\x00\xcc\x06\x3b\xe0\xdf\x7f\x86\xff\x70\
\x87\x81\x4c\x02\x3a\x80\x99\x99\xf1\xc5\x8b\x97\xaf\x1e\x3c\x50\
\xa9\x7f\xf3\xe6\xce\x19\x80\x00\x02\x3b\x40\x4d\x4d\x76\x62\x6f\
\x6f\x9a\x81\xa0\x20\x1f\xd8\xd5\x9f\x3f\x7f\x67\xf8\xf8\xf1\x2b\
\xc3\xb7\x6f\xbf\xc1\x7c\x16\x16\x66\x06\x56\x56\x56\xa0\x47\x99\
\xf4\x41\x86\xb1\xb1\x73\x30\x5c\xbf\x7c\x9c\xe1\xf1\x83\x3b\x0c\
\x6a\xda\x46\x0c\xc2\x22\x62\x40\x75\x7f\x80\x1e\xf9\xc7\xc0\xcc\
\xcc\xcc\xf0\xf7\x2f\x13\x03\x07\x07\x1f\x03\x3b\x3b\x50\x2d\x0b\
\x23\x03\x33\x28\x10\x18\x99\x18\xbe\x7c\xfd\xc1\xf0\xfd\x27\x2b\
\xd0\xfc\xbf\x0c\xf7\xef\x5f\x66\x02\x3a\x20\x09\x20\x80\xc0\x0e\
\x10\x15\xe5\x96\x65\x61\xf9\xc7\xf0\xe5\xcb\x67\xb0\xeb\x3f\x7d\
\xfa\xca\xf0\xf2\xe5\x7b\x86\x0f\x1f\xbe\x83\x83\x8f\x99\x99\x05\
\x8c\x41\x72\x5c\x9c\x5c\x0c\x77\x6f\x9f\x60\x68\x59\x75\x9c\x41\
\xc9\x3a\x80\xe1\x45\xd3\x74\x86\x25\xfd\xb9\x0c\x4b\x96\xaf\x66\
\xf8\xf8\xe1\x03\x43\x45\x45\x25\xd8\xe7\x97\xaf\x5e\x64\x10\x91\
\x92\x65\x10\x92\x94\x61\x78\xf1\x8d\x91\xe1\xf9\xd7\xff\x0c\xaf\
\xdf\xfd\x64\xe0\x7a\x78\x83\x41\x54\x50\x11\xe8\x40\x0e\x05\xa0\
\xd1\x0a\x00\x01\x04\x8e\xbc\x5f\xbf\x7e\xfd\xfd\xfd\xfb\x2f\x3c\
\xbe\x50\xe3\x0c\x82\x41\x7c\x66\x66\x26\x70\xb4\xbf\x7c\x76\x93\
\xe1\xfe\x27\x1e\x86\xdd\x8f\xa5\x18\x18\x39\x44\x19\x04\xf8\x78\
\x18\x56\x2c\x5f\xc1\xb0\x60\xc1\x22\x86\xef\xdf\x7f\x30\x28\xab\
\x28\x33\xd8\x58\x9b\x31\x3c\xff\xc6\xc4\x70\xe6\xfe\x67\x86\xcb\
\xf7\xde\x30\x7c\xff\xf2\x9b\x81\xf9\xe7\x37\x06\x0e\x60\xd4\xfd\
\xfe\xf5\x07\x18\x6d\x7f\x41\x96\xb1\x01\x04\x10\x0b\x2c\x31\x21\
\xd9\x8d\x15\x40\x1c\xc7\xc8\xf0\xf8\xf1\x0d\x06\x77\x5f\x6f\x06\
\x0e\xc1\x13\x0c\x07\x8f\x75\x31\x64\x97\x86\x30\xc8\x29\x6b\x31\
\x2c\x5d\xba\x14\x68\xf9\x77\x06\x0d\x0d\x75\x60\x82\xfe\x0d\x8c\
\x32\x76\x06\x0b\x25\x01\x86\x5f\x3f\x7e\x32\x5c\xb9\x72\x95\x41\
\x98\x4b\x8d\x81\x55\x90\x9f\xe1\x1d\x23\x3b\x30\x7a\x7f\xc2\x3c\
\xfb\x1f\x20\x80\x58\x88\xcd\x2e\x20\xdf\xbf\x7a\xf5\x88\x41\x4c\
\x8c\x9f\x41\x52\x52\x9e\x21\x39\x5e\x99\x21\x3b\x25\x92\x81\x85\
\x83\x07\x2c\x6f\x67\x67\x07\x57\xfb\xfb\x37\x24\x97\xf0\xf0\xf0\
\x32\xfc\x66\xe7\x62\x30\x30\x34\x66\xb8\x78\xf1\x1a\x83\xa4\x94\
\x38\x30\x3d\x81\x92\xe5\x0f\xb8\x87\x01\x02\x88\x05\xe1\xbb\xff\
\x78\x7c\xcf\x04\xf4\xd5\x0f\xa0\xaf\xfe\x30\xc8\xc9\x29\x83\x83\
\x99\x81\x81\x95\x81\x81\x9b\x93\x81\x0d\x18\x9c\x5f\xbe\x7c\x02\
\x3a\xee\x05\x58\x0d\x1f\x1f\x3f\x30\x4d\x49\x80\x13\xee\xb7\x6f\
\x3f\xc1\x39\x84\x99\x85\x1d\x68\xb9\x08\xc3\xa3\x47\x57\x18\x04\
\x04\x54\x19\x90\xab\x1f\x80\x00\x22\x2a\x04\x40\x89\xef\xc3\x87\
\x97\x0c\xb2\xb2\x52\xc0\x14\xfe\x1f\xec\x58\x90\xa3\x81\xd9\x92\
\xe1\xde\xbd\x07\x0c\x2f\x5e\xbc\x06\x0a\xb1\x03\x2d\x62\x03\x26\
\xde\x27\xc0\x68\x7a\xc2\xa0\xa2\xa2\xca\xc0\xc5\xc5\x0f\x4c\x5f\
\xa0\xf8\xfe\xc3\x20\x2c\x2c\x0e\x4e\xd8\x3f\x7e\x7c\x87\x46\x39\
\x24\x08\x00\x02\x88\x89\x50\x81\x08\x52\xf8\xf7\xef\x6f\xa0\x41\
\x5f\x19\xd8\xd8\xb8\xc0\x96\x42\xa2\x84\x99\xe1\xcd\x9b\x97\x0c\
\xaf\x5f\xbf\x63\xe0\xe1\x95\x64\x78\xfd\xe6\x23\xc3\xb9\x73\x67\
\x19\x38\x38\x85\x80\x39\x8e\x87\xe1\xc6\x8d\x6b\x0c\xc0\x82\x0d\
\x5a\x36\x00\xcb\x83\xff\x4c\x0c\xdc\xdc\xec\xc0\xd0\x7a\x0b\x0c\
\x1d\x84\xbf\x01\x02\x88\x09\x62\x09\xde\xe4\x07\xf4\xc1\x2f\x06\
\x4e\x4e\xa0\x0f\x99\x59\xc1\x86\xc1\x7c\xff\xfe\xfd\x7b\x06\x31\
\x71\x39\x86\x53\xa7\x8e\x30\x24\xa7\x84\x30\x14\x15\xa5\x02\xb3\
\x61\x16\xb0\xe0\xe2\x07\x46\x17\x17\xc3\xdb\xb7\xaf\x80\x96\x41\
\x3c\xf7\xf7\xef\x5f\xb0\x19\x3f\x7e\x7c\x00\x47\x29\x0c\x00\x04\
\x10\x11\x0e\x60\x00\x5b\x0a\x2a\xf9\x60\x6d\x07\x50\xd1\xfb\xf3\
\xe7\x0f\x70\xaa\x11\x13\x17\x65\x58\xb8\x60\x26\xc3\xe7\x4f\x9f\
\x40\x25\x2a\xc3\x89\xe3\x47\x18\xce\x9c\x39\xc6\x20\x2e\x21\x05\
\x2c\x70\x3e\xc2\x2d\x83\x98\xc1\x01\xe4\xff\x03\xab\x83\x15\xe3\
\x00\x01\xc4\x84\x5c\xa6\xe3\x03\x10\x4d\x08\x07\x81\x1c\xf1\x0f\
\xe8\xab\xff\xc0\x7a\x41\x50\x48\x18\x45\x2d\xbf\x00\x3f\xc3\x1f\
\x60\xb4\xfd\xfd\xfb\x0f\x29\x2a\x19\xc0\x25\xe5\xbf\x7f\xa8\xe6\
\x02\x04\x10\x52\x2e\xc0\x57\x06\x30\x83\xf3\x38\xc2\x31\xff\x80\
\xe9\x81\x13\x98\xe8\x58\x18\x5e\x3e\x7f\xca\x50\x5e\x51\xcd\xf0\
\x13\x98\xb8\x5e\x3c\x7f\xce\xe0\xe1\xed\xc3\x60\x6e\x6e\xc9\x70\
\xe3\xfa\x45\x06\x49\x09\x49\x68\x94\x41\xec\x00\x25\x40\x26\x26\
\x56\xb0\xe3\x61\x76\x02\x04\x10\x0b\xa1\x28\x00\x19\xc0\xc6\xc6\
\x06\xcc\x05\xff\x80\x3e\xfa\x05\x54\xcb\x0e\x97\x13\x03\xd6\x01\
\xf7\xee\xdf\x65\x10\x11\x91\x60\x98\x39\x7b\x3e\x38\x7b\xf2\xf0\
\x70\x33\xdc\xbd\x73\x1d\x58\xda\xfd\x64\x90\x90\x90\x02\xc7\x3d\
\xac\x3e\xf9\xf1\xe3\x17\x50\x5e\x84\x01\x58\xc3\xc2\xed\x04\x08\
\x20\x22\xb2\x21\x24\xb8\x39\x39\xf9\x19\xbe\x7e\xfd\xc4\xc0\xcf\
\x2f\x08\x4f\x54\xc2\x22\xa2\x40\xd7\xff\x67\x78\xf2\xe4\x31\xc3\
\xdb\x9f\x7f\x80\xc1\xfe\x87\xe1\xc5\xb3\x3f\xc0\xc4\xca\xc8\x60\
\x6a\x6a\x01\x2e\x0d\x7f\xff\xfe\x01\xcd\x49\xbf\x80\x69\xe2\x27\
\x30\x27\x08\x00\x13\xef\x57\x78\xb4\x03\x04\x10\x51\x51\x00\xb2\
\x8c\x8f\x4f\x88\xe1\xd9\xb3\x7b\xc0\x52\x50\x12\xe8\x20\x48\x28\
\x80\x7c\x22\x21\x21\x0d\x2c\x1d\x45\x81\x86\x7f\x01\xe7\x16\x16\
\x16\x56\xa0\x2f\xf9\xc1\xf1\xff\xfd\xfb\x4f\xb0\x3a\x36\x36\x56\
\x86\xdb\xb7\x6f\x03\x1d\x26\x08\xae\xd4\x90\x01\x40\x00\x11\x99\
\x08\xff\x83\x35\x72\x72\x8a\x82\xf3\x37\x28\x38\x61\x8e\xfe\x0b\
\xf4\x35\x0b\xb0\xa4\x13\x11\x11\x01\x3a\x4e\x0a\x48\x8b\x82\x83\
\x17\xe4\x68\x10\x60\x65\x65\x01\x46\xdf\x6b\x60\xf9\xff\x97\x41\
\x46\x46\x0d\xac\x1e\xb9\xee\x01\x08\x20\xa2\xb2\x21\x2c\x14\x84\
\x84\x44\x81\x05\x12\x2b\xc3\xe5\xcb\xe7\xc0\x7a\x40\x69\x03\x51\
\xd0\x20\xd4\x81\xf8\xa0\x82\x8a\x9d\x9d\x8d\xe1\xdd\xbb\xd7\x0c\
\x77\xee\x3c\x06\x56\x52\xe6\xf0\x34\x85\x0c\x00\x02\x88\x09\x5a\
\xaa\x31\x83\x2a\x1b\x6c\x55\x30\x72\x13\x0b\x54\xf8\x48\x4a\x2a\
\x00\x83\x96\x95\xe1\xc8\x91\xa3\x0c\xcf\x9f\x3f\x01\xfa\x90\x19\
\x5c\xd8\xc0\x13\x15\x30\x74\x40\x05\x0e\xa8\x14\xbc\x7a\xf5\x32\
\x30\xe8\x9f\x31\xe8\xe8\x58\x03\x8b\x65\x0e\x70\x6b\x09\x62\x16\
\x13\xb8\x69\x06\x52\x0f\x10\x40\xe0\x08\x79\xf2\xe4\xcd\xed\x87\
\x0f\xdf\x09\x2b\x2a\x4a\x01\xe3\x15\x54\x9b\x81\xe2\x17\x98\xcf\
\xff\x31\xc1\x2b\x23\x18\x06\xb5\x1b\x84\x85\xe5\x80\x29\xfa\x1b\
\xb0\x04\xbc\x07\xac\x6a\xef\x01\x43\x86\x17\xe8\x30\x71\xb0\x8f\
\x3f\x00\x1b\x25\xcf\x9e\x01\xeb\xff\xef\xff\x80\xe9\x46\x8a\x41\
\x5b\x5b\x05\xec\xf8\x5f\xbf\x80\x25\xc6\x3f\x90\x67\x58\x80\x0d\
\x9e\xcf\xa0\x68\x01\xd5\x68\xff\x01\x02\x88\x11\x14\x24\x52\x52\
\x7a\x76\xe2\xe2\x72\xd5\xbc\xbc\xdc\xb2\xa0\xf8\x06\x66\xf3\xff\
\xa0\x82\x07\x56\xec\x22\xb7\x07\x40\x86\xc1\x2a\x28\x50\xf9\x00\
\xf2\xe9\xb7\x6f\x9f\x80\x21\xc1\x08\xf6\x3d\xa8\xf2\x01\x25\x52\
\x7e\x7e\x21\x50\x33\x0e\x68\xd6\x1f\xa8\xc5\x90\xb4\x04\x64\x33\
\x7e\xfc\xf8\xe6\xe7\xb5\x6b\x07\xf7\x7c\xff\xfe\x69\x0e\x40\x00\
\x31\x42\x2d\x01\x49\x4b\x01\xb1\x1c\x28\x46\xe8\xd0\x1a\x07\x95\
\x87\xc0\x2a\x94\xe1\x11\x40\x00\x31\x0e\x74\xdf\x10\x20\x80\x06\
\xbc\x63\x02\x10\x40\x03\xee\x00\x80\x00\x1a\x70\x07\x00\x04\x18\
\x00\x4e\x12\xc6\x99\x32\x89\xe5\xec\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0a\
\x0c\x91\x67\x27\
\x00\x63\
\x00\x61\x00\x6d\x00\x65\x00\x72\x00\x61\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
normal
|
{
"blob_id": "2a6b373c443a1bbafe644cb770bc163536dd5573",
"index": 3348,
"step-1": "<mask token>\n\n\ndef qInitResources():\n QtCore.qRegisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef qInitResources():\n QtCore.qRegisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\nqInitResources()\n",
"step-3": "<mask token>\nqt_resource_data = (\n b'\\x00\\x00\\x07\\xb6\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00 \\x00\\x00\\x00 \\x08\\x06\\x00\\x00\\x00szz\\xf4\\x00\\x00\\x00\\x04gAMA\\x00\\x00\\xaf\\xc87\\x05\\x8a\\xe9\\x00\\x00\\x00\\x19tEXtSoftware\\x00Adobe ImageReadyq\\xc9e<\\x00\\x00\\x07HIDATx\\xdab\\xfc\\xff\\xff?\\xc3@\\x02\\x80\\x00bb\\x18`\\x00\\x10@\\x03\\xee\\x00\\x80\\x00\\x1ap\\x07\\x00\\x04\\xd0\\x80;\\x00 \\x80X@\\x84\\xb1q*\\xc3\\x8f\\x1f?\\x99~\\xfe\\xfc\\xce\\xfd\\xff\\xff\\xbf\\xbfJJ\\xaa\\x86RR\\x02Y<<\\x8cf\\x7f\\xfe\\xfca~\\xf2\\xe4\\xed\\x8d\\xd3\\xa7\\xf7-\\xfb\\xf6\\xed\\xddv%%\\xc7\\x9f\\x0c\\x0c\\xff\\xa0\\xda\\x19!\\xbe`\\x82\\xf8\\x83\\x91\\x91\\x11\\x8e\\x99\\x98\\x18\\x19@\\xe9\\x1b\\xc4\\x86\\xc9\\x81\\xc4\\x98\\x98\\x98\\x19\\x7f\\xff\\xfe\\xf4\\xe5\\xcc\\x99\\xa5\\xe0\\xd4\\x0f\\x10@`\\x07\\xfc\\xfb\\xf7\\xdfNEE\\xa6AP\\x90\\x97\\x07\\x98+\\xferppjpq1\\n\\xb0\\xb11\\x81-\\xe1\\xe5\\xe5U\\x94\\x92\\x8au\\xff\\xf7\\xef\\xefe\\x06\\x86\\xff?A\\x86\\xb0\\xb3\\xb3\\x83\\x03\\x10d\\xfe\\xef\\xdf\\xbfA\\x06\\x83-\\x01z\\x00\\xcc\\x06;\\xe0\\xdf\\x7f\\x86\\xffp\\x87\\x81L\\x02:\\x80\\x99\\x99\\xf1\\xc5\\x8b\\x97\\xaf\\x1e<P\\xa9\\x7f\\xf3\\xe6\\xce\\x19\\x80\\x00\\x02;@MMvboo\\x9a\\x81\\xa0 \\x1f\\xd8\\xd5\\x9f?\\x7fg\\xf8\\xf8\\xf1+\\xc3\\xb7o\\xbf\\xc1|\\x16\\x16f\\x06VVV\\xa0G\\x99\\xf4A\\x86\\xb1\\xb1s0\\\\\\xbf|\\x9c\\xe1\\xf1\\x83;\\x0cj\\xdaF\\x0c\\xc2\"b@u\\x7f\\x80\\x1e\\xf9\\xc7\\xc0\\xcc\\xcc\\xcc\\xf0\\xf7/\\x13\\x03\\x07\\x07\\x1f\\x03;;P-\\x0b#\\x033(\\x10\\x18\\x99\\x18\\xbe|\\xfd\\xc1\\xf0\\xfd\\'+\\xd0\\xfc\\xbf\\x0c\\xf7\\xef_f\\x02: \\t \\x80\\xc0\\x0e\\x10\\x15\\xe5\\x96ea\\xf9\\xc7\\xf0\\xe5\\xcbg\\xb0\\xeb?}\\xfa\\xca\\xf0\\xf2\\xe5{\\x86\\x0f\\x1f\\xbe\\x83\\x83\\x8f\\x99\\x99\\x05\\x8cAr\\\\\\x9c\\\\\\x0cwo\\x9f`hYu\\x9cA\\xc9:\\x80\\xe1E\\xd3t\\x86%\\xfd\\xb9\\x0cK\\x96\\xaff\\xf8\\xf8\\xe1\\x03CEE%\\xd8\\xe7\\x97\\xaf^d\\x10\\x91\\x92e\\x10\\x92\\x94ax\\xf1\\x8d\\x91\\xe1\\xf9\\xd7\\xff\\x0c\\xaf\\xdf\\xfdd\\xe0zx\\x83ATP\\x11\\xe8@\\x0e\\x05\\xa0\\xd1\\n\\x00\\x01\\x04\\x8e\\xbc_\\xbf~\\xfd\\xfd\\xfd\\xfb/<\\xbeP\\xe3\\x0c\\x82A|ff&p\\xb4\\xbf|v\\x93\\xe1\\xfe\\'\\x1e\\x86\\xdd\\x8f\\xa5\\x18\\x189D\\x19\\x04\\xf8x\\x18V,_\\xc1\\xb0`\\xc1\"\\x86\\xef\\xdf\\x7f0(\\xab(3\\xd8X\\x9b1<\\xff\\xc6\\xc4p\\xe6\\xfeg\\x86\\xcb\\xf7\\xde0|\\xff\\xf2\\x9b\\x81\\xf9\\xe77\\x06\\x0e`\\xd4\\xfd\\xfe\\xf5\\x07\\x18m\\x7fA\\x96\\xb1\\x01\\x04\\x10\\x0b,1!\\xd9\\x8d\\x15@\\x1c\\xc7\\xc8\\xf0\\xf8\\xf1\\r\\x06w_o\\x06\\x0e\\xc1\\x13\\x0c\\x07\\x8fu1d\\x97\\x860\\xc8)k1,]\\xba\\x14h\\xf9w\\x06\\r\\ru`\\x82\\xfe\\r\\x8c2v\\x06\\x0b%\\x01\\x86_?~2\\\\\\xb9r\\x95A\\x98K\\x8d\\x81U\\x90\\x9f\\xe1\\x1d#;0z\\x7f\\xc2<\\xfb\\x1f \\x80X\\x88\\xcd. \\xdf\\xbfz\\xf5\\x88AL\\x8c\\x9fARR\\x9e!9^\\x99!;%\\x92\\x81\\x85\\x83\\x07,ogg\\x07W\\xfb\\xfb7$\\x97\\xf0\\xf0\\xf02\\xfcf\\xe7b004f\\xb8x\\xf1\\x1a\\x83\\xa4\\x9480=\\x81\\x92\\xe5\\x0f\\xb8\\x87\\x01\\x02\\x88\\x05\\xe1\\xbb\\xffx|\\xcf\\x04\\xf4\\xd5\\x0f\\xa0\\xaf\\xfe0\\xc8\\xc9)\\x83\\x83\\x99\\x81\\x81\\x95\\x81\\x81\\x9b\\x93\\x81\\r\\x18\\x9c_\\xbe|\\x02:\\xee\\x05X\\r\\x1f\\x1f?0MI\\x80\\x13\\xee\\xb7o?\\xc19\\x84\\x99\\x85\\x1dh\\xb9\\x08\\xc3\\xa3GW\\x18\\x04\\x04T\\x19\\x90\\xab\\x1f\\x80\\x00\"*\\x04@\\x89\\xef\\xc3\\x87\\x97\\x0c\\xb2\\xb2R\\xc0\\x14\\xfe\\x1f\\xecX\\x90\\xa3\\x81\\xd9\\x92\\xe1\\xde\\xbd\\x07\\x0c/^\\xbc\\x06\\n\\xb1\\x03-b\\x03&\\xde\\'\\xc0hz\\xc2\\xa0\\xa2\\xa2\\xca\\xc0\\xc5\\xc5\\x0fL_\\xa0\\xf8\\xfe\\xc3 ,,\\x0eN\\xd8?~|\\x87F9$\\x08\\x00\\x02\\x88\\x89P\\x81\\x08R\\xf8\\xf7\\xefo\\xa0A_\\x19\\xd8\\xd8\\xb8\\xc0\\x96B\\xa2\\x84\\x99\\xe1\\xcd\\x9b\\x97\\x0c\\xaf_\\xbfc\\xe0\\xe1\\x95dx\\xfd\\xe6#\\xc3\\xb9sg\\x1988\\x85\\x809\\x8e\\x87\\xe1\\xc6\\x8dk\\x0c\\xc0\\x82\\rZ6\\x00\\xcb\\x83\\xffL\\x0c\\xdc\\xdc\\xec\\xc0\\xd0z\\x0b\\x0c\\x1d\\x84\\xbf\\x01\\x02\\x88\\tb\\t\\xde\\xe4\\x07\\xf4\\xc1/\\x06NN\\xa0\\x0f\\x99Y\\xc1\\x86\\xc1|\\xff\\xfe\\xfd{\\x061q9\\x86S\\xa7\\x8e0$\\xa7\\x840\\x14\\x15\\xa5\\x02\\xb3a\\x16\\xb0\\xe0\\xe2\\x07F\\x17\\x17\\xc3\\xdb\\xb7\\xaf\\x80\\x96A<\\xf7\\xf7\\xef_\\xb0\\x19?~|\\x00G)\\x0c\\x00\\x04\\x10\\x11\\x0e`\\x00[\\n*\\xf9`m\\x07P\\xd1\\xfb\\xf3\\xe7\\x0fp\\xaa\\x11\\x13\\x17eX\\xb8`&\\xc3\\xe7O\\x9f@%*\\xc3\\x89\\xe3G\\x18\\xce\\x9c9\\xc6 .!\\x05,p>\\xc2-\\x83\\x98\\xc1\\x01\\xe4\\xff\\x03\\xab\\x83\\x15\\xe3\\x00\\x01\\xc4\\x84\\\\\\xa6\\xe3\\x03\\x10M\\x08\\x07\\x81\\x1c\\xf1\\x0f\\xe8\\xab\\xff\\xc0zAPH\\x18E-\\xbf\\x00?\\xc3\\x1f`\\xb4\\xfd\\xfd\\xfb\\x0f)*\\x19\\xc0%\\xe5\\xbf\\x7f\\xa8\\xe6\\x02\\x04\\x10R.\\xc0W\\x060\\x83\\xf38\\xc21\\xff\\x80\\xe9\\x81\\x13\\x98\\xe8X\\x18^>\\x7f\\xcaP^Q\\xcd\\xf0\\x13\\x98\\xb8^<\\x7f\\xce\\xe0\\xe1\\xed\\xc3`nn\\xc9p\\xe3\\xfaE\\x06I\\tIh\\x94A\\xec\\x00%@&&V\\xb0\\xe3av\\x02\\x04\\x10\\x0b\\xa1(\\x00\\x19\\xc0\\xc6\\xc6\\x06\\xcc\\x05\\xff\\x80>\\xfa\\x05T\\xcb\\x0e\\x97\\x13\\x03\\xd6\\x01\\xf7\\xee\\xdfe\\x10\\x11\\x91`\\x989{>8{\\xf2\\xf0p3\\xdc\\xbds\\x1dX\\xda\\xfdd\\x90\\x90\\x90\\x02\\xc7=\\xac>\\xf9\\xf1\\xe3\\x17P^\\x84\\x01X\\xc3\\xc2\\xed\\x04\\x08 \"\\xb2!$\\xb899\\xf9\\x19\\xbe~\\xfd\\xc4\\xc0\\xcf/\\x08OT\\xc2\"\\xa2@\\xd7\\xffgx\\xf2\\xe41\\xc3\\xdb\\x9f\\x7f\\x80\\xc1\\xfe\\x87\\xe1\\xc5\\xb3?\\xc0\\xc4\\xca\\xc8`jj\\x01.\\r\\x7f\\xff\\xfe\\x01\\xcdI\\xbf\\x80i\\xe2\\'0\\'\\x08\\x00\\x13\\xefWx\\xb4\\x03\\x04\\x10QQ\\x00\\xb2\\x8c\\x8fO\\x88\\xe1\\xd9\\xb3{\\xc0RP\\x12\\xe8 H(\\x80|\"!!\\r,\\x1dE\\x81\\x86\\x7f\\x01\\xe7\\x16\\x16\\x16V\\xa0/\\xf9\\xc1\\xf1\\xff\\xfd\\xfbO\\xb0:66V\\x86\\xdb\\xb7o\\x03\\x1d&\\x08\\xae\\xd4\\x90\\x01@\\x00\\x11\\x99\\x08\\xff\\x835rr\\x8a\\x82\\xf37(8a\\x8e\\xfe\\x0b\\xf45\\x0b\\xb0\\xa4\\x13\\x11\\x11\\x01:N\\nH\\x8b\\x82\\x83\\x17\\xe4h\\x10`ee\\x01F\\xdfk`\\xf9\\xff\\x97AFF\\r\\xac\\x1e\\xb9\\xee\\x01\\x08 \\xa2\\xb2!,\\x14\\x84\\x84D\\x81\\x05\\x12+\\xc3\\xe5\\xcb\\xe7\\xc0z@i\\x03Q\\xd0 \\xd4\\x81\\xf8\\xa0\\x82\\x8a\\x9d\\x9d\\x8d\\xe1\\xdd\\xbb\\xd7\\x0cw\\xee<\\x06VR\\xe6\\xf04\\x85\\x0c\\x00\\x02\\x88\\tZ\\xaa1\\x83*\\x1blU0r\\x13\\x0bT\\xf8HJ*\\x00\\x83\\x96\\x95\\xe1\\xc8\\x91\\xa3\\x0c\\xcf\\x9f?\\x01\\xfa\\x90\\x19\\\\\\xd8\\xc0\\x13\\x150t@\\x05\\x0e\\xa8\\x14\\xbcz\\xf520\\xe8\\x9f1\\xe8\\xe8X\\x03\\x8be\\x0epk\\tb\\x16\\x13\\xb8i\\x06R\\x0f\\x10@\\xe0\\x08y\\xf2\\xe4\\xcd\\xed\\x87\\x0f\\xdf\\t+*J\\x01\\xe3\\x15T\\x9b\\x81\\xe2\\x17\\x98\\xcf\\xff1\\xc1+#\\x18\\x06\\xb5\\x1b\\x84\\x85\\xe5\\x80)\\xfa\\x1b\\xb0\\x04\\xbc\\x07\\xacj\\xef\\x01C\\x86\\x17\\xe80q\\xb0\\x8f?\\x00\\x1b%\\xcf\\x9e\\x01\\xeb\\xff\\xef\\xff\\x80\\xe9F\\x8aA[[\\x05\\xec\\xf8_\\xbf\\x80%\\xc6?\\x90gX\\x80\\r\\x9e\\xcf\\xa0h\\x01\\xd5h\\xff\\x01\\x02\\x88\\x11\\x14$RRzv\\xe2\\xe2r\\xd5\\xbc\\xbc\\xdc\\xb2\\xa0\\xf8\\x06f\\xf3\\xff\\xa0\\x82\\x07V\\xec\"\\xb7\\x07@\\x86\\xc1*(P\\xf9\\x00\\xf2\\xe9\\xb7o\\x9f\\x80!\\xc1\\x08\\xf6=\\xa8\\xf2\\x01%R~~!P3\\x0eh\\xd6\\x1f\\xa8\\xc5\\x90\\xb4\\x04d3~\\xfc\\xf8\\xe6\\xe7\\xb5k\\x07\\xf7|\\xff\\xfei\\x0e@\\x001B-\\x01IK\\x01\\xb1\\x1c(F\\xe8\\xd0\\x1a\\x07\\x95\\x87\\xc0*\\x94\\xe1\\x11@\\x001\\x0et\\xdf\\x10 \\x80\\x06\\xbcc\\x02\\x10@\\x03\\xee\\x00\\x80\\x00\\x1ap\\x07\\x00\\x04\\x18\\x00N\\x12\\xc6\\x992\\x89\\xe5\\xec\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n )\nqt_resource_name = (\n b\"\\x00\\x06\\x07\\x03}\\xc3\\x00i\\x00m\\x00a\\x00g\\x00e\\x00s\\x00\\n\\x0c\\x91g'\\x00c\\x00a\\x00m\\x00e\\x00r\\x00a\\x00.\\x00p\\x00n\\x00g\"\n )\nqt_resource_struct = (\n b'\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00'\n )\n\n\ndef qInitResources():\n QtCore.qRegisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\nqInitResources()\n",
"step-4": "from PyQt4 import QtCore\nqt_resource_data = (\n b'\\x00\\x00\\x07\\xb6\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00 \\x00\\x00\\x00 \\x08\\x06\\x00\\x00\\x00szz\\xf4\\x00\\x00\\x00\\x04gAMA\\x00\\x00\\xaf\\xc87\\x05\\x8a\\xe9\\x00\\x00\\x00\\x19tEXtSoftware\\x00Adobe ImageReadyq\\xc9e<\\x00\\x00\\x07HIDATx\\xdab\\xfc\\xff\\xff?\\xc3@\\x02\\x80\\x00bb\\x18`\\x00\\x10@\\x03\\xee\\x00\\x80\\x00\\x1ap\\x07\\x00\\x04\\xd0\\x80;\\x00 \\x80X@\\x84\\xb1q*\\xc3\\x8f\\x1f?\\x99~\\xfe\\xfc\\xce\\xfd\\xff\\xff\\xbf\\xbfJJ\\xaa\\x86RR\\x02Y<<\\x8cf\\x7f\\xfe\\xfca~\\xf2\\xe4\\xed\\x8d\\xd3\\xa7\\xf7-\\xfb\\xf6\\xed\\xddv%%\\xc7\\x9f\\x0c\\x0c\\xff\\xa0\\xda\\x19!\\xbe`\\x82\\xf8\\x83\\x91\\x91\\x11\\x8e\\x99\\x98\\x18\\x19@\\xe9\\x1b\\xc4\\x86\\xc9\\x81\\xc4\\x98\\x98\\x98\\x19\\x7f\\xff\\xfe\\xf4\\xe5\\xcc\\x99\\xa5\\xe0\\xd4\\x0f\\x10@`\\x07\\xfc\\xfb\\xf7\\xdfNEE\\xa6AP\\x90\\x97\\x07\\x98+\\xferppjpq1\\n\\xb0\\xb11\\x81-\\xe1\\xe5\\xe5U\\x94\\x92\\x8au\\xff\\xf7\\xef\\xefe\\x06\\x86\\xff?A\\x86\\xb0\\xb3\\xb3\\x83\\x03\\x10d\\xfe\\xef\\xdf\\xbfA\\x06\\x83-\\x01z\\x00\\xcc\\x06;\\xe0\\xdf\\x7f\\x86\\xffp\\x87\\x81L\\x02:\\x80\\x99\\x99\\xf1\\xc5\\x8b\\x97\\xaf\\x1e<P\\xa9\\x7f\\xf3\\xe6\\xce\\x19\\x80\\x00\\x02;@MMvboo\\x9a\\x81\\xa0 \\x1f\\xd8\\xd5\\x9f?\\x7fg\\xf8\\xf8\\xf1+\\xc3\\xb7o\\xbf\\xc1|\\x16\\x16f\\x06VVV\\xa0G\\x99\\xf4A\\x86\\xb1\\xb1s0\\\\\\xbf|\\x9c\\xe1\\xf1\\x83;\\x0cj\\xdaF\\x0c\\xc2\"b@u\\x7f\\x80\\x1e\\xf9\\xc7\\xc0\\xcc\\xcc\\xcc\\xf0\\xf7/\\x13\\x03\\x07\\x07\\x1f\\x03;;P-\\x0b#\\x033(\\x10\\x18\\x99\\x18\\xbe|\\xfd\\xc1\\xf0\\xfd\\'+\\xd0\\xfc\\xbf\\x0c\\xf7\\xef_f\\x02: \\t \\x80\\xc0\\x0e\\x10\\x15\\xe5\\x96ea\\xf9\\xc7\\xf0\\xe5\\xcbg\\xb0\\xeb?}\\xfa\\xca\\xf0\\xf2\\xe5{\\x86\\x0f\\x1f\\xbe\\x83\\x83\\x8f\\x99\\x99\\x05\\x8cAr\\\\\\x9c\\\\\\x0cwo\\x9f`hYu\\x9cA\\xc9:\\x80\\xe1E\\xd3t\\x86%\\xfd\\xb9\\x0cK\\x96\\xaff\\xf8\\xf8\\xe1\\x03CEE%\\xd8\\xe7\\x97\\xaf^d\\x10\\x91\\x92e\\x10\\x92\\x94ax\\xf1\\x8d\\x91\\xe1\\xf9\\xd7\\xff\\x0c\\xaf\\xdf\\xfdd\\xe0zx\\x83ATP\\x11\\xe8@\\x0e\\x05\\xa0\\xd1\\n\\x00\\x01\\x04\\x8e\\xbc_\\xbf~\\xfd\\xfd\\xfd\\xfb/<\\xbeP\\xe3\\x0c\\x82A|ff&p\\xb4\\xbf|v\\x93\\xe1\\xfe\\'\\x1e\\x86\\xdd\\x8f\\xa5\\x18\\x189D\\x19\\x04\\xf8x\\x18V,_\\xc1\\xb0`\\xc1\"\\x86\\xef\\xdf\\x7f0(\\xab(3\\xd8X\\x9b1<\\xff\\xc6\\xc4p\\xe6\\xfeg\\x86\\xcb\\xf7\\xde0|\\xff\\xf2\\x9b\\x81\\xf9\\xe77\\x06\\x0e`\\xd4\\xfd\\xfe\\xf5\\x07\\x18m\\x7fA\\x96\\xb1\\x01\\x04\\x10\\x0b,1!\\xd9\\x8d\\x15@\\x1c\\xc7\\xc8\\xf0\\xf8\\xf1\\r\\x06w_o\\x06\\x0e\\xc1\\x13\\x0c\\x07\\x8fu1d\\x97\\x860\\xc8)k1,]\\xba\\x14h\\xf9w\\x06\\r\\ru`\\x82\\xfe\\r\\x8c2v\\x06\\x0b%\\x01\\x86_?~2\\\\\\xb9r\\x95A\\x98K\\x8d\\x81U\\x90\\x9f\\xe1\\x1d#;0z\\x7f\\xc2<\\xfb\\x1f \\x80X\\x88\\xcd. \\xdf\\xbfz\\xf5\\x88AL\\x8c\\x9fARR\\x9e!9^\\x99!;%\\x92\\x81\\x85\\x83\\x07,ogg\\x07W\\xfb\\xfb7$\\x97\\xf0\\xf0\\xf02\\xfcf\\xe7b004f\\xb8x\\xf1\\x1a\\x83\\xa4\\x9480=\\x81\\x92\\xe5\\x0f\\xb8\\x87\\x01\\x02\\x88\\x05\\xe1\\xbb\\xffx|\\xcf\\x04\\xf4\\xd5\\x0f\\xa0\\xaf\\xfe0\\xc8\\xc9)\\x83\\x83\\x99\\x81\\x81\\x95\\x81\\x81\\x9b\\x93\\x81\\r\\x18\\x9c_\\xbe|\\x02:\\xee\\x05X\\r\\x1f\\x1f?0MI\\x80\\x13\\xee\\xb7o?\\xc19\\x84\\x99\\x85\\x1dh\\xb9\\x08\\xc3\\xa3GW\\x18\\x04\\x04T\\x19\\x90\\xab\\x1f\\x80\\x00\"*\\x04@\\x89\\xef\\xc3\\x87\\x97\\x0c\\xb2\\xb2R\\xc0\\x14\\xfe\\x1f\\xecX\\x90\\xa3\\x81\\xd9\\x92\\xe1\\xde\\xbd\\x07\\x0c/^\\xbc\\x06\\n\\xb1\\x03-b\\x03&\\xde\\'\\xc0hz\\xc2\\xa0\\xa2\\xa2\\xca\\xc0\\xc5\\xc5\\x0fL_\\xa0\\xf8\\xfe\\xc3 ,,\\x0eN\\xd8?~|\\x87F9$\\x08\\x00\\x02\\x88\\x89P\\x81\\x08R\\xf8\\xf7\\xefo\\xa0A_\\x19\\xd8\\xd8\\xb8\\xc0\\x96B\\xa2\\x84\\x99\\xe1\\xcd\\x9b\\x97\\x0c\\xaf_\\xbfc\\xe0\\xe1\\x95dx\\xfd\\xe6#\\xc3\\xb9sg\\x1988\\x85\\x809\\x8e\\x87\\xe1\\xc6\\x8dk\\x0c\\xc0\\x82\\rZ6\\x00\\xcb\\x83\\xffL\\x0c\\xdc\\xdc\\xec\\xc0\\xd0z\\x0b\\x0c\\x1d\\x84\\xbf\\x01\\x02\\x88\\tb\\t\\xde\\xe4\\x07\\xf4\\xc1/\\x06NN\\xa0\\x0f\\x99Y\\xc1\\x86\\xc1|\\xff\\xfe\\xfd{\\x061q9\\x86S\\xa7\\x8e0$\\xa7\\x840\\x14\\x15\\xa5\\x02\\xb3a\\x16\\xb0\\xe0\\xe2\\x07F\\x17\\x17\\xc3\\xdb\\xb7\\xaf\\x80\\x96A<\\xf7\\xf7\\xef_\\xb0\\x19?~|\\x00G)\\x0c\\x00\\x04\\x10\\x11\\x0e`\\x00[\\n*\\xf9`m\\x07P\\xd1\\xfb\\xf3\\xe7\\x0fp\\xaa\\x11\\x13\\x17eX\\xb8`&\\xc3\\xe7O\\x9f@%*\\xc3\\x89\\xe3G\\x18\\xce\\x9c9\\xc6 .!\\x05,p>\\xc2-\\x83\\x98\\xc1\\x01\\xe4\\xff\\x03\\xab\\x83\\x15\\xe3\\x00\\x01\\xc4\\x84\\\\\\xa6\\xe3\\x03\\x10M\\x08\\x07\\x81\\x1c\\xf1\\x0f\\xe8\\xab\\xff\\xc0zAPH\\x18E-\\xbf\\x00?\\xc3\\x1f`\\xb4\\xfd\\xfd\\xfb\\x0f)*\\x19\\xc0%\\xe5\\xbf\\x7f\\xa8\\xe6\\x02\\x04\\x10R.\\xc0W\\x060\\x83\\xf38\\xc21\\xff\\x80\\xe9\\x81\\x13\\x98\\xe8X\\x18^>\\x7f\\xcaP^Q\\xcd\\xf0\\x13\\x98\\xb8^<\\x7f\\xce\\xe0\\xe1\\xed\\xc3`nn\\xc9p\\xe3\\xfaE\\x06I\\tIh\\x94A\\xec\\x00%@&&V\\xb0\\xe3av\\x02\\x04\\x10\\x0b\\xa1(\\x00\\x19\\xc0\\xc6\\xc6\\x06\\xcc\\x05\\xff\\x80>\\xfa\\x05T\\xcb\\x0e\\x97\\x13\\x03\\xd6\\x01\\xf7\\xee\\xdfe\\x10\\x11\\x91`\\x989{>8{\\xf2\\xf0p3\\xdc\\xbds\\x1dX\\xda\\xfdd\\x90\\x90\\x90\\x02\\xc7=\\xac>\\xf9\\xf1\\xe3\\x17P^\\x84\\x01X\\xc3\\xc2\\xed\\x04\\x08 \"\\xb2!$\\xb899\\xf9\\x19\\xbe~\\xfd\\xc4\\xc0\\xcf/\\x08OT\\xc2\"\\xa2@\\xd7\\xffgx\\xf2\\xe41\\xc3\\xdb\\x9f\\x7f\\x80\\xc1\\xfe\\x87\\xe1\\xc5\\xb3?\\xc0\\xc4\\xca\\xc8`jj\\x01.\\r\\x7f\\xff\\xfe\\x01\\xcdI\\xbf\\x80i\\xe2\\'0\\'\\x08\\x00\\x13\\xefWx\\xb4\\x03\\x04\\x10QQ\\x00\\xb2\\x8c\\x8fO\\x88\\xe1\\xd9\\xb3{\\xc0RP\\x12\\xe8 H(\\x80|\"!!\\r,\\x1dE\\x81\\x86\\x7f\\x01\\xe7\\x16\\x16\\x16V\\xa0/\\xf9\\xc1\\xf1\\xff\\xfd\\xfbO\\xb0:66V\\x86\\xdb\\xb7o\\x03\\x1d&\\x08\\xae\\xd4\\x90\\x01@\\x00\\x11\\x99\\x08\\xff\\x835rr\\x8a\\x82\\xf37(8a\\x8e\\xfe\\x0b\\xf45\\x0b\\xb0\\xa4\\x13\\x11\\x11\\x01:N\\nH\\x8b\\x82\\x83\\x17\\xe4h\\x10`ee\\x01F\\xdfk`\\xf9\\xff\\x97AFF\\r\\xac\\x1e\\xb9\\xee\\x01\\x08 \\xa2\\xb2!,\\x14\\x84\\x84D\\x81\\x05\\x12+\\xc3\\xe5\\xcb\\xe7\\xc0z@i\\x03Q\\xd0 \\xd4\\x81\\xf8\\xa0\\x82\\x8a\\x9d\\x9d\\x8d\\xe1\\xdd\\xbb\\xd7\\x0cw\\xee<\\x06VR\\xe6\\xf04\\x85\\x0c\\x00\\x02\\x88\\tZ\\xaa1\\x83*\\x1blU0r\\x13\\x0bT\\xf8HJ*\\x00\\x83\\x96\\x95\\xe1\\xc8\\x91\\xa3\\x0c\\xcf\\x9f?\\x01\\xfa\\x90\\x19\\\\\\xd8\\xc0\\x13\\x150t@\\x05\\x0e\\xa8\\x14\\xbcz\\xf520\\xe8\\x9f1\\xe8\\xe8X\\x03\\x8be\\x0epk\\tb\\x16\\x13\\xb8i\\x06R\\x0f\\x10@\\xe0\\x08y\\xf2\\xe4\\xcd\\xed\\x87\\x0f\\xdf\\t+*J\\x01\\xe3\\x15T\\x9b\\x81\\xe2\\x17\\x98\\xcf\\xff1\\xc1+#\\x18\\x06\\xb5\\x1b\\x84\\x85\\xe5\\x80)\\xfa\\x1b\\xb0\\x04\\xbc\\x07\\xacj\\xef\\x01C\\x86\\x17\\xe80q\\xb0\\x8f?\\x00\\x1b%\\xcf\\x9e\\x01\\xeb\\xff\\xef\\xff\\x80\\xe9F\\x8aA[[\\x05\\xec\\xf8_\\xbf\\x80%\\xc6?\\x90gX\\x80\\r\\x9e\\xcf\\xa0h\\x01\\xd5h\\xff\\x01\\x02\\x88\\x11\\x14$RRzv\\xe2\\xe2r\\xd5\\xbc\\xbc\\xdc\\xb2\\xa0\\xf8\\x06f\\xf3\\xff\\xa0\\x82\\x07V\\xec\"\\xb7\\x07@\\x86\\xc1*(P\\xf9\\x00\\xf2\\xe9\\xb7o\\x9f\\x80!\\xc1\\x08\\xf6=\\xa8\\xf2\\x01%R~~!P3\\x0eh\\xd6\\x1f\\xa8\\xc5\\x90\\xb4\\x04d3~\\xfc\\xf8\\xe6\\xe7\\xb5k\\x07\\xf7|\\xff\\xfei\\x0e@\\x001B-\\x01IK\\x01\\xb1\\x1c(F\\xe8\\xd0\\x1a\\x07\\x95\\x87\\xc0*\\x94\\xe1\\x11@\\x001\\x0et\\xdf\\x10 \\x80\\x06\\xbcc\\x02\\x10@\\x03\\xee\\x00\\x80\\x00\\x1ap\\x07\\x00\\x04\\x18\\x00N\\x12\\xc6\\x992\\x89\\xe5\\xec\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n )\nqt_resource_name = (\n b\"\\x00\\x06\\x07\\x03}\\xc3\\x00i\\x00m\\x00a\\x00g\\x00e\\x00s\\x00\\n\\x0c\\x91g'\\x00c\\x00a\\x00m\\x00e\\x00r\\x00a\\x00.\\x00p\\x00n\\x00g\"\n )\nqt_resource_struct = (\n b'\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00'\n )\n\n\ndef qInitResources():\n QtCore.qRegisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n\n\nqInitResources()\n",
"step-5": "###############################################################################\n##\n## Copyright (C) 2011-2014, NYU-Poly.\n## Copyright (C) 2006-2011, University of Utah. \n## All rights reserved.\n## Contact: contact@vistrails.org\n##\n## This file is part of VisTrails.\n##\n## \"Redistribution and use in source and binary forms, with or without \n## modification, are permitted provided that the following conditions are met:\n##\n## - Redistributions of source code must retain the above copyright notice, \n## this list of conditions and the following disclaimer.\n## - Redistributions in binary form must reproduce the above copyright \n## notice, this list of conditions and the following disclaimer in the \n## documentation and/or other materials provided with the distribution.\n## - Neither the name of the University of Utah nor the names of its \n## contributors may be used to endorse or promote products derived from \n## this software without specific prior written permission.\n##\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; \n## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\"\n##\n###############################################################################\n# -*- coding: utf-8 -*-\n\n# Resource object code\n#\n# Created: Mon Jul 19 16:02:11 2010\n# by: The Resource Compiler for PyQt (Qt v4.6.3)\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00\\x07\\xb6\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x20\\x00\\x00\\x00\\x20\\x08\\x06\\x00\\x00\\x00\\x73\\x7a\\x7a\\xf4\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xaf\\xc8\\x37\\x05\\x8a\\xe9\\\n\\x00\\x00\\x00\\x19\\x74\\x45\\x58\\x74\\x53\\x6f\\x66\\x74\\x77\\x61\\x72\\x65\\\n\\x00\\x41\\x64\\x6f\\x62\\x65\\x20\\x49\\x6d\\x61\\x67\\x65\\x52\\x65\\x61\\x64\\\n\\x79\\x71\\xc9\\x65\\x3c\\x00\\x00\\x07\\x48\\x49\\x44\\x41\\x54\\x78\\xda\\x62\\\n\\xfc\\xff\\xff\\x3f\\xc3\\x40\\x02\\x80\\x00\\x62\\x62\\x18\\x60\\x00\\x10\\x40\\\n\\x03\\xee\\x00\\x80\\x00\\x1a\\x70\\x07\\x00\\x04\\xd0\\x80\\x3b\\x00\\x20\\x80\\\n\\x58\\x40\\x84\\xb1\\x71\\x2a\\xc3\\x8f\\x1f\\x3f\\x99\\x7e\\xfe\\xfc\\xce\\xfd\\\n\\xff\\xff\\xbf\\xbf\\x4a\\x4a\\xaa\\x86\\x52\\x52\\x02\\x59\\x3c\\x3c\\x8c\\x66\\\n\\x7f\\xfe\\xfc\\x61\\x7e\\xf2\\xe4\\xed\\x8d\\xd3\\xa7\\xf7\\x2d\\xfb\\xf6\\xed\\\n\\xdd\\x76\\x25\\x25\\xc7\\x9f\\x0c\\x0c\\xff\\xa0\\xda\\x19\\x21\\xbe\\x60\\x82\\\n\\xf8\\x83\\x91\\x91\\x11\\x8e\\x99\\x98\\x18\\x19\\x40\\xe9\\x1b\\xc4\\x86\\xc9\\\n\\x81\\xc4\\x98\\x98\\x98\\x19\\x7f\\xff\\xfe\\xf4\\xe5\\xcc\\x99\\xa5\\xe0\\xd4\\\n\\x0f\\x10\\x40\\x60\\x07\\xfc\\xfb\\xf7\\xdf\\x4e\\x45\\x45\\xa6\\x41\\x50\\x90\\\n\\x97\\x07\\x98\\x2b\\xfe\\x72\\x70\\x70\\x6a\\x70\\x71\\x31\\x0a\\xb0\\xb1\\x31\\\n\\x81\\x2d\\xe1\\xe5\\xe5\\x55\\x94\\x92\\x8a\\x75\\xff\\xf7\\xef\\xef\\x65\\x06\\\n\\x86\\xff\\x3f\\x41\\x86\\xb0\\xb3\\xb3\\x83\\x03\\x10\\x64\\xfe\\xef\\xdf\\xbf\\\n\\x41\\x06\\x83\\x2d\\x01\\x7a\\x00\\xcc\\x06\\x3b\\xe0\\xdf\\x7f\\x86\\xff\\x70\\\n\\x87\\x81\\x4c\\x02\\x3a\\x80\\x99\\x99\\xf1\\xc5\\x8b\\x97\\xaf\\x1e\\x3c\\x50\\\n\\xa9\\x7f\\xf3\\xe6\\xce\\x19\\x80\\x00\\x02\\x3b\\x40\\x4d\\x4d\\x76\\x62\\x6f\\\n\\x6f\\x9a\\x81\\xa0\\x20\\x1f\\xd8\\xd5\\x9f\\x3f\\x7f\\x67\\xf8\\xf8\\xf1\\x2b\\\n\\xc3\\xb7\\x6f\\xbf\\xc1\\x7c\\x16\\x16\\x66\\x06\\x56\\x56\\x56\\xa0\\x47\\x99\\\n\\xf4\\x41\\x86\\xb1\\xb1\\x73\\x30\\x5c\\xbf\\x7c\\x9c\\xe1\\xf1\\x83\\x3b\\x0c\\\n\\x6a\\xda\\x46\\x0c\\xc2\\x22\\x62\\x40\\x75\\x7f\\x80\\x1e\\xf9\\xc7\\xc0\\xcc\\\n\\xcc\\xcc\\xf0\\xf7\\x2f\\x13\\x03\\x07\\x07\\x1f\\x03\\x3b\\x3b\\x50\\x2d\\x0b\\\n\\x23\\x03\\x33\\x28\\x10\\x18\\x99\\x18\\xbe\\x7c\\xfd\\xc1\\xf0\\xfd\\x27\\x2b\\\n\\xd0\\xfc\\xbf\\x0c\\xf7\\xef\\x5f\\x66\\x02\\x3a\\x20\\x09\\x20\\x80\\xc0\\x0e\\\n\\x10\\x15\\xe5\\x96\\x65\\x61\\xf9\\xc7\\xf0\\xe5\\xcb\\x67\\xb0\\xeb\\x3f\\x7d\\\n\\xfa\\xca\\xf0\\xf2\\xe5\\x7b\\x86\\x0f\\x1f\\xbe\\x83\\x83\\x8f\\x99\\x99\\x05\\\n\\x8c\\x41\\x72\\x5c\\x9c\\x5c\\x0c\\x77\\x6f\\x9f\\x60\\x68\\x59\\x75\\x9c\\x41\\\n\\xc9\\x3a\\x80\\xe1\\x45\\xd3\\x74\\x86\\x25\\xfd\\xb9\\x0c\\x4b\\x96\\xaf\\x66\\\n\\xf8\\xf8\\xe1\\x03\\x43\\x45\\x45\\x25\\xd8\\xe7\\x97\\xaf\\x5e\\x64\\x10\\x91\\\n\\x92\\x65\\x10\\x92\\x94\\x61\\x78\\xf1\\x8d\\x91\\xe1\\xf9\\xd7\\xff\\x0c\\xaf\\\n\\xdf\\xfd\\x64\\xe0\\x7a\\x78\\x83\\x41\\x54\\x50\\x11\\xe8\\x40\\x0e\\x05\\xa0\\\n\\xd1\\x0a\\x00\\x01\\x04\\x8e\\xbc\\x5f\\xbf\\x7e\\xfd\\xfd\\xfd\\xfb\\x2f\\x3c\\\n\\xbe\\x50\\xe3\\x0c\\x82\\x41\\x7c\\x66\\x66\\x26\\x70\\xb4\\xbf\\x7c\\x76\\x93\\\n\\xe1\\xfe\\x27\\x1e\\x86\\xdd\\x8f\\xa5\\x18\\x18\\x39\\x44\\x19\\x04\\xf8\\x78\\\n\\x18\\x56\\x2c\\x5f\\xc1\\xb0\\x60\\xc1\\x22\\x86\\xef\\xdf\\x7f\\x30\\x28\\xab\\\n\\x28\\x33\\xd8\\x58\\x9b\\x31\\x3c\\xff\\xc6\\xc4\\x70\\xe6\\xfe\\x67\\x86\\xcb\\\n\\xf7\\xde\\x30\\x7c\\xff\\xf2\\x9b\\x81\\xf9\\xe7\\x37\\x06\\x0e\\x60\\xd4\\xfd\\\n\\xfe\\xf5\\x07\\x18\\x6d\\x7f\\x41\\x96\\xb1\\x01\\x04\\x10\\x0b\\x2c\\x31\\x21\\\n\\xd9\\x8d\\x15\\x40\\x1c\\xc7\\xc8\\xf0\\xf8\\xf1\\x0d\\x06\\x77\\x5f\\x6f\\x06\\\n\\x0e\\xc1\\x13\\x0c\\x07\\x8f\\x75\\x31\\x64\\x97\\x86\\x30\\xc8\\x29\\x6b\\x31\\\n\\x2c\\x5d\\xba\\x14\\x68\\xf9\\x77\\x06\\x0d\\x0d\\x75\\x60\\x82\\xfe\\x0d\\x8c\\\n\\x32\\x76\\x06\\x0b\\x25\\x01\\x86\\x5f\\x3f\\x7e\\x32\\x5c\\xb9\\x72\\x95\\x41\\\n\\x98\\x4b\\x8d\\x81\\x55\\x90\\x9f\\xe1\\x1d\\x23\\x3b\\x30\\x7a\\x7f\\xc2\\x3c\\\n\\xfb\\x1f\\x20\\x80\\x58\\x88\\xcd\\x2e\\x20\\xdf\\xbf\\x7a\\xf5\\x88\\x41\\x4c\\\n\\x8c\\x9f\\x41\\x52\\x52\\x9e\\x21\\x39\\x5e\\x99\\x21\\x3b\\x25\\x92\\x81\\x85\\\n\\x83\\x07\\x2c\\x6f\\x67\\x67\\x07\\x57\\xfb\\xfb\\x37\\x24\\x97\\xf0\\xf0\\xf0\\\n\\x32\\xfc\\x66\\xe7\\x62\\x30\\x30\\x34\\x66\\xb8\\x78\\xf1\\x1a\\x83\\xa4\\x94\\\n\\x38\\x30\\x3d\\x81\\x92\\xe5\\x0f\\xb8\\x87\\x01\\x02\\x88\\x05\\xe1\\xbb\\xff\\\n\\x78\\x7c\\xcf\\x04\\xf4\\xd5\\x0f\\xa0\\xaf\\xfe\\x30\\xc8\\xc9\\x29\\x83\\x83\\\n\\x99\\x81\\x81\\x95\\x81\\x81\\x9b\\x93\\x81\\x0d\\x18\\x9c\\x5f\\xbe\\x7c\\x02\\\n\\x3a\\xee\\x05\\x58\\x0d\\x1f\\x1f\\x3f\\x30\\x4d\\x49\\x80\\x13\\xee\\xb7\\x6f\\\n\\x3f\\xc1\\x39\\x84\\x99\\x85\\x1d\\x68\\xb9\\x08\\xc3\\xa3\\x47\\x57\\x18\\x04\\\n\\x04\\x54\\x19\\x90\\xab\\x1f\\x80\\x00\\x22\\x2a\\x04\\x40\\x89\\xef\\xc3\\x87\\\n\\x97\\x0c\\xb2\\xb2\\x52\\xc0\\x14\\xfe\\x1f\\xec\\x58\\x90\\xa3\\x81\\xd9\\x92\\\n\\xe1\\xde\\xbd\\x07\\x0c\\x2f\\x5e\\xbc\\x06\\x0a\\xb1\\x03\\x2d\\x62\\x03\\x26\\\n\\xde\\x27\\xc0\\x68\\x7a\\xc2\\xa0\\xa2\\xa2\\xca\\xc0\\xc5\\xc5\\x0f\\x4c\\x5f\\\n\\xa0\\xf8\\xfe\\xc3\\x20\\x2c\\x2c\\x0e\\x4e\\xd8\\x3f\\x7e\\x7c\\x87\\x46\\x39\\\n\\x24\\x08\\x00\\x02\\x88\\x89\\x50\\x81\\x08\\x52\\xf8\\xf7\\xef\\x6f\\xa0\\x41\\\n\\x5f\\x19\\xd8\\xd8\\xb8\\xc0\\x96\\x42\\xa2\\x84\\x99\\xe1\\xcd\\x9b\\x97\\x0c\\\n\\xaf\\x5f\\xbf\\x63\\xe0\\xe1\\x95\\x64\\x78\\xfd\\xe6\\x23\\xc3\\xb9\\x73\\x67\\\n\\x19\\x38\\x38\\x85\\x80\\x39\\x8e\\x87\\xe1\\xc6\\x8d\\x6b\\x0c\\xc0\\x82\\x0d\\\n\\x5a\\x36\\x00\\xcb\\x83\\xff\\x4c\\x0c\\xdc\\xdc\\xec\\xc0\\xd0\\x7a\\x0b\\x0c\\\n\\x1d\\x84\\xbf\\x01\\x02\\x88\\x09\\x62\\x09\\xde\\xe4\\x07\\xf4\\xc1\\x2f\\x06\\\n\\x4e\\x4e\\xa0\\x0f\\x99\\x59\\xc1\\x86\\xc1\\x7c\\xff\\xfe\\xfd\\x7b\\x06\\x31\\\n\\x71\\x39\\x86\\x53\\xa7\\x8e\\x30\\x24\\xa7\\x84\\x30\\x14\\x15\\xa5\\x02\\xb3\\\n\\x61\\x16\\xb0\\xe0\\xe2\\x07\\x46\\x17\\x17\\xc3\\xdb\\xb7\\xaf\\x80\\x96\\x41\\\n\\x3c\\xf7\\xf7\\xef\\x5f\\xb0\\x19\\x3f\\x7e\\x7c\\x00\\x47\\x29\\x0c\\x00\\x04\\\n\\x10\\x11\\x0e\\x60\\x00\\x5b\\x0a\\x2a\\xf9\\x60\\x6d\\x07\\x50\\xd1\\xfb\\xf3\\\n\\xe7\\x0f\\x70\\xaa\\x11\\x13\\x17\\x65\\x58\\xb8\\x60\\x26\\xc3\\xe7\\x4f\\x9f\\\n\\x40\\x25\\x2a\\xc3\\x89\\xe3\\x47\\x18\\xce\\x9c\\x39\\xc6\\x20\\x2e\\x21\\x05\\\n\\x2c\\x70\\x3e\\xc2\\x2d\\x83\\x98\\xc1\\x01\\xe4\\xff\\x03\\xab\\x83\\x15\\xe3\\\n\\x00\\x01\\xc4\\x84\\x5c\\xa6\\xe3\\x03\\x10\\x4d\\x08\\x07\\x81\\x1c\\xf1\\x0f\\\n\\xe8\\xab\\xff\\xc0\\x7a\\x41\\x50\\x48\\x18\\x45\\x2d\\xbf\\x00\\x3f\\xc3\\x1f\\\n\\x60\\xb4\\xfd\\xfd\\xfb\\x0f\\x29\\x2a\\x19\\xc0\\x25\\xe5\\xbf\\x7f\\xa8\\xe6\\\n\\x02\\x04\\x10\\x52\\x2e\\xc0\\x57\\x06\\x30\\x83\\xf3\\x38\\xc2\\x31\\xff\\x80\\\n\\xe9\\x81\\x13\\x98\\xe8\\x58\\x18\\x5e\\x3e\\x7f\\xca\\x50\\x5e\\x51\\xcd\\xf0\\\n\\x13\\x98\\xb8\\x5e\\x3c\\x7f\\xce\\xe0\\xe1\\xed\\xc3\\x60\\x6e\\x6e\\xc9\\x70\\\n\\xe3\\xfa\\x45\\x06\\x49\\x09\\x49\\x68\\x94\\x41\\xec\\x00\\x25\\x40\\x26\\x26\\\n\\x56\\xb0\\xe3\\x61\\x76\\x02\\x04\\x10\\x0b\\xa1\\x28\\x00\\x19\\xc0\\xc6\\xc6\\\n\\x06\\xcc\\x05\\xff\\x80\\x3e\\xfa\\x05\\x54\\xcb\\x0e\\x97\\x13\\x03\\xd6\\x01\\\n\\xf7\\xee\\xdf\\x65\\x10\\x11\\x91\\x60\\x98\\x39\\x7b\\x3e\\x38\\x7b\\xf2\\xf0\\\n\\x70\\x33\\xdc\\xbd\\x73\\x1d\\x58\\xda\\xfd\\x64\\x90\\x90\\x90\\x02\\xc7\\x3d\\\n\\xac\\x3e\\xf9\\xf1\\xe3\\x17\\x50\\x5e\\x84\\x01\\x58\\xc3\\xc2\\xed\\x04\\x08\\\n\\x20\\x22\\xb2\\x21\\x24\\xb8\\x39\\x39\\xf9\\x19\\xbe\\x7e\\xfd\\xc4\\xc0\\xcf\\\n\\x2f\\x08\\x4f\\x54\\xc2\\x22\\xa2\\x40\\xd7\\xff\\x67\\x78\\xf2\\xe4\\x31\\xc3\\\n\\xdb\\x9f\\x7f\\x80\\xc1\\xfe\\x87\\xe1\\xc5\\xb3\\x3f\\xc0\\xc4\\xca\\xc8\\x60\\\n\\x6a\\x6a\\x01\\x2e\\x0d\\x7f\\xff\\xfe\\x01\\xcd\\x49\\xbf\\x80\\x69\\xe2\\x27\\\n\\x30\\x27\\x08\\x00\\x13\\xef\\x57\\x78\\xb4\\x03\\x04\\x10\\x51\\x51\\x00\\xb2\\\n\\x8c\\x8f\\x4f\\x88\\xe1\\xd9\\xb3\\x7b\\xc0\\x52\\x50\\x12\\xe8\\x20\\x48\\x28\\\n\\x80\\x7c\\x22\\x21\\x21\\x0d\\x2c\\x1d\\x45\\x81\\x86\\x7f\\x01\\xe7\\x16\\x16\\\n\\x16\\x56\\xa0\\x2f\\xf9\\xc1\\xf1\\xff\\xfd\\xfb\\x4f\\xb0\\x3a\\x36\\x36\\x56\\\n\\x86\\xdb\\xb7\\x6f\\x03\\x1d\\x26\\x08\\xae\\xd4\\x90\\x01\\x40\\x00\\x11\\x99\\\n\\x08\\xff\\x83\\x35\\x72\\x72\\x8a\\x82\\xf3\\x37\\x28\\x38\\x61\\x8e\\xfe\\x0b\\\n\\xf4\\x35\\x0b\\xb0\\xa4\\x13\\x11\\x11\\x01\\x3a\\x4e\\x0a\\x48\\x8b\\x82\\x83\\\n\\x17\\xe4\\x68\\x10\\x60\\x65\\x65\\x01\\x46\\xdf\\x6b\\x60\\xf9\\xff\\x97\\x41\\\n\\x46\\x46\\x0d\\xac\\x1e\\xb9\\xee\\x01\\x08\\x20\\xa2\\xb2\\x21\\x2c\\x14\\x84\\\n\\x84\\x44\\x81\\x05\\x12\\x2b\\xc3\\xe5\\xcb\\xe7\\xc0\\x7a\\x40\\x69\\x03\\x51\\\n\\xd0\\x20\\xd4\\x81\\xf8\\xa0\\x82\\x8a\\x9d\\x9d\\x8d\\xe1\\xdd\\xbb\\xd7\\x0c\\\n\\x77\\xee\\x3c\\x06\\x56\\x52\\xe6\\xf0\\x34\\x85\\x0c\\x00\\x02\\x88\\x09\\x5a\\\n\\xaa\\x31\\x83\\x2a\\x1b\\x6c\\x55\\x30\\x72\\x13\\x0b\\x54\\xf8\\x48\\x4a\\x2a\\\n\\x00\\x83\\x96\\x95\\xe1\\xc8\\x91\\xa3\\x0c\\xcf\\x9f\\x3f\\x01\\xfa\\x90\\x19\\\n\\x5c\\xd8\\xc0\\x13\\x15\\x30\\x74\\x40\\x05\\x0e\\xa8\\x14\\xbc\\x7a\\xf5\\x32\\\n\\x30\\xe8\\x9f\\x31\\xe8\\xe8\\x58\\x03\\x8b\\x65\\x0e\\x70\\x6b\\x09\\x62\\x16\\\n\\x13\\xb8\\x69\\x06\\x52\\x0f\\x10\\x40\\xe0\\x08\\x79\\xf2\\xe4\\xcd\\xed\\x87\\\n\\x0f\\xdf\\x09\\x2b\\x2a\\x4a\\x01\\xe3\\x15\\x54\\x9b\\x81\\xe2\\x17\\x98\\xcf\\\n\\xff\\x31\\xc1\\x2b\\x23\\x18\\x06\\xb5\\x1b\\x84\\x85\\xe5\\x80\\x29\\xfa\\x1b\\\n\\xb0\\x04\\xbc\\x07\\xac\\x6a\\xef\\x01\\x43\\x86\\x17\\xe8\\x30\\x71\\xb0\\x8f\\\n\\x3f\\x00\\x1b\\x25\\xcf\\x9e\\x01\\xeb\\xff\\xef\\xff\\x80\\xe9\\x46\\x8a\\x41\\\n\\x5b\\x5b\\x05\\xec\\xf8\\x5f\\xbf\\x80\\x25\\xc6\\x3f\\x90\\x67\\x58\\x80\\x0d\\\n\\x9e\\xcf\\xa0\\x68\\x01\\xd5\\x68\\xff\\x01\\x02\\x88\\x11\\x14\\x24\\x52\\x52\\\n\\x7a\\x76\\xe2\\xe2\\x72\\xd5\\xbc\\xbc\\xdc\\xb2\\xa0\\xf8\\x06\\x66\\xf3\\xff\\\n\\xa0\\x82\\x07\\x56\\xec\\x22\\xb7\\x07\\x40\\x86\\xc1\\x2a\\x28\\x50\\xf9\\x00\\\n\\xf2\\xe9\\xb7\\x6f\\x9f\\x80\\x21\\xc1\\x08\\xf6\\x3d\\xa8\\xf2\\x01\\x25\\x52\\\n\\x7e\\x7e\\x21\\x50\\x33\\x0e\\x68\\xd6\\x1f\\xa8\\xc5\\x90\\xb4\\x04\\x64\\x33\\\n\\x7e\\xfc\\xf8\\xe6\\xe7\\xb5\\x6b\\x07\\xf7\\x7c\\xff\\xfe\\x69\\x0e\\x40\\x00\\\n\\x31\\x42\\x2d\\x01\\x49\\x4b\\x01\\xb1\\x1c\\x28\\x46\\xe8\\xd0\\x1a\\x07\\x95\\\n\\x87\\xc0\\x2a\\x94\\xe1\\x11\\x40\\x00\\x31\\x0e\\x74\\xdf\\x10\\x20\\x80\\x06\\\n\\xbc\\x63\\x02\\x10\\x40\\x03\\xee\\x00\\x80\\x00\\x1a\\x70\\x07\\x00\\x04\\x18\\\n\\x00\\x4e\\x12\\xc6\\x99\\x32\\x89\\xe5\\xec\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\\n\\x44\\xae\\x42\\x60\\x82\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x06\\\n\\x07\\x03\\x7d\\xc3\\\n\\x00\\x69\\\n\\x00\\x6d\\x00\\x61\\x00\\x67\\x00\\x65\\x00\\x73\\\n\\x00\\x0a\\\n\\x0c\\x91\\x67\\x27\\\n\\x00\\x63\\\n\\x00\\x61\\x00\\x6d\\x00\\x65\\x00\\x72\\x00\\x61\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\"\n\nqt_resource_struct = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x12\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\"\n\ndef qInitResources():\n QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
tn=int(input())
for ti in range(tn):
#ans = work()
rn,cn = [int(x) for x in input().split()]
evenRow='-'.join(['+']*(cn+1))
oddRow='.'.join(['|']*(cn+1))
artrn = rn*2+1
print(f'Case #{ti+1}:')
for ri in range(artrn):
defaultRow = evenRow if ri%2==0 else oddRow
if ri//2==0:
print('..'+defaultRow[2:])
else:
print(defaultRow)
|
normal
|
{
"blob_id": "1972e3733918da654cd156a500432a35a239aed4",
"index": 1841,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor ti in range(tn):\n rn, cn = [int(x) for x in input().split()]\n evenRow = '-'.join(['+'] * (cn + 1))\n oddRow = '.'.join(['|'] * (cn + 1))\n artrn = rn * 2 + 1\n print(f'Case #{ti + 1}:')\n for ri in range(artrn):\n defaultRow = evenRow if ri % 2 == 0 else oddRow\n if ri // 2 == 0:\n print('..' + defaultRow[2:])\n else:\n print(defaultRow)\n",
"step-3": "tn = int(input())\nfor ti in range(tn):\n rn, cn = [int(x) for x in input().split()]\n evenRow = '-'.join(['+'] * (cn + 1))\n oddRow = '.'.join(['|'] * (cn + 1))\n artrn = rn * 2 + 1\n print(f'Case #{ti + 1}:')\n for ri in range(artrn):\n defaultRow = evenRow if ri % 2 == 0 else oddRow\n if ri // 2 == 0:\n print('..' + defaultRow[2:])\n else:\n print(defaultRow)\n",
"step-4": "tn=int(input())\nfor ti in range(tn):\n #ans = work()\n rn,cn = [int(x) for x in input().split()]\n evenRow='-'.join(['+']*(cn+1))\n oddRow='.'.join(['|']*(cn+1))\n artrn = rn*2+1\n print(f'Case #{ti+1}:')\n for ri in range(artrn):\n defaultRow = evenRow if ri%2==0 else oddRow\n if ri//2==0:\n print('..'+defaultRow[2:])\n else:\n print(defaultRow)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import datetime
from yahoo_finance import Share
def getYahooStock(ticker, date1, date2):
companyData = Share(ticker)
dataList = companyData.get_historical(date1, date2)
endData = dataList[0];
startData = dataList[len(dataList) - 1];
print ticker, float(startData['Open']), float(endData['Open'])
return ticker, float(startData['Open']), float(endData['Open'])
def stockDrop(ticker, date1):
currentDate = datetime.datetime.now()
formattedDate = (str(currentDate.year) + '-' + str(currentDate.month) + '-' + str(currentDate.day))
companyData = Share(ticker)
dataList = companyData.get_historical(date1, formattedDate);
originalStock = float(dataList[len(dataList) - 1]['Open']);
nextLower = 0
days = -1
for index, i in enumerate(reversed(dataList)):
nextLower = i['Open']
if float(nextLower) < float(originalStock):
days = len(dataList) - index
break
print days, originalStock, nextLower
return days, originalStock, nextLower
#def stockRange(ticker, date, range):
# dateRange = datetime.datetime()
def buyStock(ticker, buyDate, sellDate, amount):
data = getYahooStock(ticker, buyDate, sellDate)
print (amount * data[2])/data[1]
return (amount * data[2])/data[1]
start_date = datetime.datetime(2017, 4, 7, 0)
end_date = datetime.datetime(2017, 4, 14, 0)
d = start_date
delta = datetime.timedelta(hours=1)
print delta
companyData = Share('UAL')
dataList = companyData.get_historical(date1, date2)
while d <= end_date:
print getYahooStock
print d.strftime("%Y-%m-%d %H")
d += delta
stockDrop("BP", '2016-03-29')
getYahooStock("WFC", '2016-03-29', '2017-03-29')
buyStock("WFC", '2016-03-29', '2017-03-29', 1)
|
normal
|
{
"blob_id": "07854dc9e0a863834b8e671d29d5f407cdd1c13e",
"index": 9599,
"step-1": "import requests\nimport datetime\nfrom yahoo_finance import Share\n\ndef getYahooStock(ticker, date1, date2):\n companyData = Share(ticker)\n dataList = companyData.get_historical(date1, date2)\n endData = dataList[0];\n startData = dataList[len(dataList) - 1];\n print ticker, float(startData['Open']), float(endData['Open'])\n return ticker, float(startData['Open']), float(endData['Open'])\n\ndef stockDrop(ticker, date1):\n currentDate = datetime.datetime.now()\n formattedDate = (str(currentDate.year) + '-' + str(currentDate.month) + '-' + str(currentDate.day))\n companyData = Share(ticker)\n dataList = companyData.get_historical(date1, formattedDate);\n originalStock = float(dataList[len(dataList) - 1]['Open']);\n nextLower = 0\n days = -1\n for index, i in enumerate(reversed(dataList)):\n nextLower = i['Open']\n if float(nextLower) < float(originalStock):\n days = len(dataList) - index\n break\n print days, originalStock, nextLower\n return days, originalStock, nextLower\n\n#def stockRange(ticker, date, range):\n # dateRange = datetime.datetime()\n\ndef buyStock(ticker, buyDate, sellDate, amount):\n data = getYahooStock(ticker, buyDate, sellDate)\n print (amount * data[2])/data[1]\n return (amount * data[2])/data[1]\n\nstart_date = datetime.datetime(2017, 4, 7, 0)\nend_date = datetime.datetime(2017, 4, 14, 0)\n\nd = start_date\ndelta = datetime.timedelta(hours=1)\nprint delta\ncompanyData = Share('UAL')\ndataList = companyData.get_historical(date1, date2)\nwhile d <= end_date:\n print getYahooStock\n print d.strftime(\"%Y-%m-%d %H\")\n d += delta\n\nstockDrop(\"BP\", '2016-03-29')\ngetYahooStock(\"WFC\", '2016-03-29', '2017-03-29')\nbuyStock(\"WFC\", '2016-03-29', '2017-03-29', 1)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for char in word:
if count == 0:
print(char.upper(), end='')
count = 1
else:
print(char.lower(), end='')
count = 0
<|reserved_special_token_1|>
<|reserved_special_token_0|>
word = str(input('please enter the word\n'))
count = 0
for char in word:
if count == 0:
print(char.upper(), end='')
count = 1
else:
print(char.lower(), end='')
count = 0
<|reserved_special_token_1|>
"""Write a program that asks the user to enter a word and then
capitalizes every other letter of that word. So if the user enters "rhinoceros",
the program should print "rHiNoCeRoS"""
word=str(input("please enter the word\n"))
count=0
for char in word:
if count==0:
print(char.upper(),end="")
count=1
else:
print(char.lower(),end="")
count=0
|
flexible
|
{
"blob_id": "bc837d95ef22bd376f8b095e7aeb1f7d15c0e22e",
"index": 941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor char in word:\n if count == 0:\n print(char.upper(), end='')\n count = 1\n else:\n print(char.lower(), end='')\n count = 0\n",
"step-3": "<mask token>\nword = str(input('please enter the word\\n'))\ncount = 0\nfor char in word:\n if count == 0:\n print(char.upper(), end='')\n count = 1\n else:\n print(char.lower(), end='')\n count = 0\n",
"step-4": "\"\"\"Write a program that asks the user to enter a word and then\ncapitalizes every other letter of that word. So if the user enters \"rhinoceros\",\nthe program should print \"rHiNoCeRoS\"\"\"\n\nword=str(input(\"please enter the word\\n\"))\ncount=0\nfor char in word:\n if count==0:\n print(char.upper(),end=\"\")\n count=1\n else:\n print(char.lower(),end=\"\")\n count=0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pickle
if __name__ == '__main__':
with open('id_generator.bin', 'rb') as f:
print(pickle.load(f))
|
normal
|
{
"blob_id": "080110e404cf5edfe53622a5942b53f9188ddd76",
"index": 1854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n with open('id_generator.bin', 'rb') as f:\n print(pickle.load(f))\n",
"step-3": "import pickle\nif __name__ == '__main__':\n with open('id_generator.bin', 'rb') as f:\n print(pickle.load(f))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
h = int(input())
a = int(input())
b = int(input())
c = (h - b + a - b - 1) // (a - b)
print(int(c))
|
normal
|
{
"blob_id": "eea962d6c519bee802c346fcf8d0c7410e00c30b",
"index": 9587,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(c))\n",
"step-3": "h = int(input())\na = int(input())\nb = int(input())\nc = (h - b + a - b - 1) // (a - b)\nprint(int(c))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class gramWishbone(Peripheral, Elaboratable):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name='wishbone')
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width // data_width
addr_width = log2_int(core.size // (self.native_port.data_width //
data_width))
self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self
.ratio), data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +
log2_int(data_width // granularity), data_width=granularity)
self.bus.memory_map = map
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name='wishbone')
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width // data_width
addr_width = log2_int(core.size // (self.native_port.data_width //
data_width))
self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self
.ratio), data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +
log2_int(data_width // granularity), data_width=granularity)
self.bus.memory_map = map
def elaborate(self, platform):
m = Module()
m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.
bus.stb & self.bus.we)]
ratio_bitmask = Repl(1, log2_int(self.ratio))
sel = Signal.like(self.bus.sel)
with m.If(self.bus.sel == 0):
m.d.comb += sel.eq(Repl(1, sel.width))
with m.Else():
m.d.comb += sel.eq(self.bus.sel)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self
.bus.granularity // 8) << self.ratio * i)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.data.eq(self.bus.
dat_w << self.bus.data_width * i)
m.d.comb += [self.native_port.rdata.ready.eq(1)]
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.
data >> self.bus.data_width * i)
with m.FSM():
with m.State('Send-Cmd'):
m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &
self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),
self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(
self.bus.data_width // self.bus.granularity))]
with m.If(self.native_port.cmd.valid & self.native_port.cmd
.ready):
with m.If(self.bus.we):
m.next = 'Wait-Write'
with m.Else():
m.next = 'Wait-Read'
with m.State('Wait-Read'):
with m.If(self.native_port.rdata.valid):
m.d.comb += self.bus.ack.eq(1)
m.next = 'Send-Cmd'
with m.State('Wait-Write'):
with m.If(self.native_port.wdata.ready):
m.d.comb += self.bus.ack.eq(1)
m.next = 'Send-Cmd'
return m
<|reserved_special_token_1|>
from math import log2
from nmigen import *
from nmigen.utils import log2_int
from nmigen_soc import wishbone
from nmigen_soc.memory import MemoryMap
from lambdasoc.periph import Peripheral
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name='wishbone')
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width // data_width
addr_width = log2_int(core.size // (self.native_port.data_width //
data_width))
self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self
.ratio), data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +
log2_int(data_width // granularity), data_width=granularity)
self.bus.memory_map = map
def elaborate(self, platform):
m = Module()
m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.
bus.stb & self.bus.we)]
ratio_bitmask = Repl(1, log2_int(self.ratio))
sel = Signal.like(self.bus.sel)
with m.If(self.bus.sel == 0):
m.d.comb += sel.eq(Repl(1, sel.width))
with m.Else():
m.d.comb += sel.eq(self.bus.sel)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self
.bus.granularity // 8) << self.ratio * i)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.data.eq(self.bus.
dat_w << self.bus.data_width * i)
m.d.comb += [self.native_port.rdata.ready.eq(1)]
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.
data >> self.bus.data_width * i)
with m.FSM():
with m.State('Send-Cmd'):
m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &
self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),
self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(
self.bus.data_width // self.bus.granularity))]
with m.If(self.native_port.cmd.valid & self.native_port.cmd
.ready):
with m.If(self.bus.we):
m.next = 'Wait-Write'
with m.Else():
m.next = 'Wait-Read'
with m.State('Wait-Read'):
with m.If(self.native_port.rdata.valid):
m.d.comb += self.bus.ack.eq(1)
m.next = 'Send-Cmd'
with m.State('Wait-Write'):
with m.If(self.native_port.wdata.ready):
m.d.comb += self.bus.ack.eq(1)
m.next = 'Send-Cmd'
return m
<|reserved_special_token_1|>
# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
# License: BSD
from math import log2
from nmigen import *
from nmigen.utils import log2_int
from nmigen_soc import wishbone
from nmigen_soc.memory import MemoryMap
from lambdasoc.periph import Peripheral
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name="wishbone")
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width//data_width
addr_width = log2_int(core.size//(self.native_port.data_width//data_width))
self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),
data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),
data_width=granularity)
self.bus.memory_map = map
def elaborate(self, platform):
m = Module()
# Write datapath
m.d.comb += [
self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),
]
ratio_bitmask = Repl(1, log2_int(self.ratio))
sel = Signal.like(self.bus.sel)
with m.If(self.bus.sel == 0):
m.d.comb += sel.eq(Repl(1, sel.width))
with m.Else():
m.d.comb += sel.eq(self.bus.sel)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))
# Read datapath
m.d.comb += [
self.native_port.rdata.ready.eq(1),
]
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))
with m.FSM():
with m.State("Send-Cmd"):
m.d.comb += [
self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),
self.native_port.cmd.we.eq(self.bus.we),
self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),
]
with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):
with m.If(self.bus.we):
m.next = "Wait-Write"
with m.Else():
m.next = "Wait-Read"
with m.State("Wait-Read"):
with m.If(self.native_port.rdata.valid):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
with m.State("Wait-Write"):
with m.If(self.native_port.wdata.ready):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
return m
|
flexible
|
{
"blob_id": "3775ba538d6fab13e35e2f0761a1cacbe087f339",
"index": 4723,
"step-1": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.\n bus.stb & self.bus.we)]\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self\n .bus.granularity // 8) << self.ratio * i)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.\n dat_w << self.bus.data_width * i)\n m.d.comb += [self.native_port.rdata.ready.eq(1)]\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.\n data >> self.bus.data_width * i)\n with m.FSM():\n with m.State('Send-Cmd'):\n m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &\n self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(\n self.bus.data_width // self.bus.granularity))]\n with m.If(self.native_port.cmd.valid & self.native_port.cmd\n .ready):\n with m.If(self.bus.we):\n m.next = 'Wait-Write'\n with m.Else():\n m.next = 'Wait-Read'\n with m.State('Wait-Read'):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n with m.State('Wait-Write'):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n return m\n",
"step-4": "from math import log2\nfrom nmigen import *\nfrom nmigen.utils import log2_int\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom lambdasoc.periph import Peripheral\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.\n bus.stb & self.bus.we)]\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self\n .bus.granularity // 8) << self.ratio * i)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.\n dat_w << self.bus.data_width * i)\n m.d.comb += [self.native_port.rdata.ready.eq(1)]\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.\n data >> self.bus.data_width * i)\n with m.FSM():\n with m.State('Send-Cmd'):\n m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &\n self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(\n self.bus.data_width // self.bus.granularity))]\n with m.If(self.native_port.cmd.valid & self.native_port.cmd\n .ready):\n with m.If(self.bus.we):\n m.next = 'Wait-Write'\n with m.Else():\n m.next = 'Wait-Read'\n with m.State('Wait-Read'):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n with m.State('Wait-Write'):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n return m\n",
"step-5": "# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>\n# License: BSD\n\nfrom math import log2\n\nfrom nmigen import *\nfrom nmigen.utils import log2_int\n\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom lambdasoc.periph import Peripheral\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name=\"wishbone\")\n\n self.native_port = core.crossbar.get_native_port()\n\n self.ratio = self.native_port.data_width//data_width\n\n addr_width = log2_int(core.size//(self.native_port.data_width//data_width))\n self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),\n data_width=data_width, granularity=granularity)\n\n map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),\n data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n\n # Write datapath\n m.d.comb += [\n self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),\n ]\n\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))\n\n # Read datapath\n m.d.comb += [\n self.native_port.rdata.ready.eq(1),\n ]\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))\n\n with m.FSM():\n with m.State(\"Send-Cmd\"):\n m.d.comb += [\n self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),\n self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),\n ]\n\n with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):\n with m.If(self.bus.we):\n m.next = \"Wait-Write\"\n with m.Else():\n m.next = \"Wait-Read\"\n\n with m.State(\"Wait-Read\"):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = \"Send-Cmd\"\n\n with m.State(\"Wait-Write\"):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = \"Send-Cmd\"\n\n return m\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Flask
from flask import request, redirect, render_template
from flask_bootstrap import Bootstrap
import urllib.request
import urllib.parse
import json
import uuid
import yaml
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
import base64
app = Flask(__name__)
Bootstrap(app)
with open("app_config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
postapikey = cfg['app']['postapikey']
mainurl = cfg['app']['mainurl']
appurl = cfg['app']['appurl']
secretkey = cfg['app']['secret']
# Some crypto staff
BLOCK_SIZE = 16
def trans(key):
return hashlib.md5(key.encode("utf-8")).digest()
def encrypt(message, passphrase):
passphrase = trans(passphrase)
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return base64.b32encode(IV + aes.encrypt(message)).decode("utf-8")
def decrypt(encrypted, passphrase):
passphrase = trans(passphrase)
encrypted = base64.b32decode(encrypted)
IV = encrypted[:BLOCK_SIZE]
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return aes.decrypt(encrypted[BLOCK_SIZE:]).decode("utf-8")
def mokum_message(message):
try:
postdata = {"post": {"timelines": ["user"],
"text": message,
"comments_disabled": True,
"nsfw": False},
"_uuid": str(uuid.uuid4())
}
req = urllib.request.Request("https://mokum.place/api/v1/posts.json")
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode("utf-8"))
message = json.loads(resp.read().decode("utf-8"))
if message['post']['id']:
return message['post']['id']
except:
return False
def mokum_comment(messageid, comment):
try:
posturl = "https://mokum.place/api/v1/posts/" + str(messageid) + "/comments.json"
postdata = {"comment": {"text": comment,
# "platform": "anonymous device"
},
"_uuid": str(uuid.uuid4())}
req = urllib.request.Request(posturl)
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode("utf-8"))
message = json.loads(resp.read().decode("utf-8"))
if message['id']:
return message['id']
except:
return False
@app.route('/')
def main():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
posttext = request.form['post']
id = mokum_message(posttext)
mokum_comment(id, "click to comment --> " + appurl + "/c/" + encrypt(str(id), secretkey))
return redirect(mainurl + str(id))
@app.route('/c/<cid>')
def comm(cid):
return render_template('comment.html', cid=cid)
@app.route('/comment', methods=['POST'])
def commented():
postid = decrypt(request.form['cid'], secretkey)
posttext = request.form['comment']
mokum_comment(postid, posttext)
return redirect(mainurl + postid)
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "e55115a65ebee5d41dcd01a5cbabc328acf152da",
"index": 6079,
"step-1": "<mask token>\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\n<mask token>\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\n<mask token>\n",
"step-3": "<mask token>\nBootstrap(app)\nwith open('app_config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n<mask token>\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nimport urllib.request\nimport urllib.parse\nimport json\nimport uuid\nimport yaml\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport base64\napp = Flask(__name__)\nBootstrap(app)\nwith open('app_config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\npostapikey = cfg['app']['postapikey']\nmainurl = cfg['app']['mainurl']\nappurl = cfg['app']['appurl']\nsecretkey = cfg['app']['secret']\nBLOCK_SIZE = 16\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nimport urllib.request\nimport urllib.parse\nimport json\nimport uuid\nimport yaml\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport base64\n\n\n\n\napp = Flask(__name__)\nBootstrap(app)\n\nwith open(\"app_config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\npostapikey = cfg['app']['postapikey']\nmainurl = cfg['app']['mainurl']\nappurl = cfg['app']['appurl']\nsecretkey = cfg['app']['secret']\n\n# Some crypto staff\n\nBLOCK_SIZE = 16\n\n\n\ndef trans(key):\n return hashlib.md5(key.encode(\"utf-8\")).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode(\"utf-8\")\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode(\"utf-8\")\n\n\ndef mokum_message(message):\n try:\n postdata = {\"post\": {\"timelines\": [\"user\"],\n \"text\": message,\n \"comments_disabled\": True,\n \"nsfw\": False},\n \"_uuid\": str(uuid.uuid4())\n }\n\n req = urllib.request.Request(\"https://mokum.place/api/v1/posts.json\")\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode(\"utf-8\"))\n\n message = json.loads(resp.read().decode(\"utf-8\"))\n\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = \"https://mokum.place/api/v1/posts/\" + str(messageid) + \"/comments.json\"\n postdata = {\"comment\": {\"text\": comment,\n # \"platform\": \"anonymous device\"\n },\n \"_uuid\": str(uuid.uuid4())}\n\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode(\"utf-8\"))\n\n message = json.loads(resp.read().decode(\"utf-8\"))\n\n if message['id']:\n return message['id']\n\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, \"click to comment --> \" + appurl + \"/c/\" + encrypt(str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GERRIT_PORT = 29418
GERRIT_USERNAME = 'dci-ci-bot'
GERRIT_HOSTNAME = 'softwarefactory-project.io'
GERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',
'/home/dci/dci-ci-bot.id_rsa')
RHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')
RHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')
RHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')
HOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME',
'/home/dci/.ssh/id_rsa')
<|reserved_special_token_1|>
import os
GERRIT_PORT = 29418
GERRIT_USERNAME = 'dci-ci-bot'
GERRIT_HOSTNAME = 'softwarefactory-project.io'
GERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',
'/home/dci/dci-ci-bot.id_rsa')
RHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')
RHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')
RHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')
HOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME',
'/home/dci/.ssh/id_rsa')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
GERRIT_PORT = 29418
GERRIT_USERNAME = 'dci-ci-bot'
GERRIT_HOSTNAME = 'softwarefactory-project.io'
GERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',
'/home/dci/dci-ci-bot.id_rsa')
RHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')
RHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')
RHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')
HOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME', '/home/dci/.ssh/id_rsa')
|
flexible
|
{
"blob_id": "8410ff0806766a09d346e930123a2696bebb4b60",
"index": 2821,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nGERRIT_PORT = 29418\nGERRIT_USERNAME = 'dci-ci-bot'\nGERRIT_HOSTNAME = 'softwarefactory-project.io'\nGERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',\n '/home/dci/dci-ci-bot.id_rsa')\nRHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')\nRHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')\nRHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')\nHOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME',\n '/home/dci/.ssh/id_rsa')\n",
"step-3": "import os\nGERRIT_PORT = 29418\nGERRIT_USERNAME = 'dci-ci-bot'\nGERRIT_HOSTNAME = 'softwarefactory-project.io'\nGERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',\n '/home/dci/dci-ci-bot.id_rsa')\nRHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')\nRHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')\nRHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')\nHOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME',\n '/home/dci/.ssh/id_rsa')\n",
"step-4": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nGERRIT_PORT = 29418\nGERRIT_USERNAME = 'dci-ci-bot'\nGERRIT_HOSTNAME = 'softwarefactory-project.io'\nGERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',\n '/home/dci/dci-ci-bot.id_rsa')\nRHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')\nRHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')\nRHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')\n\nHOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME', '/home/dci/.ssh/id_rsa')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.