ngram
listlengths
0
67.8k
[ "\"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of", "+ \"\\\", \\\"\" + City + \"\\\", \\\"\" + State + \"\\\", \\\"\"", "(userInput == 5): newBAssociation() elif (userInput == 6): return elif (userInput == 7):", "name of the representative: \") RLName = input(\"Last name of the representative: \")", "4): newCourses() elif (userInput == 5): newBAssociation() elif (userInput == 6): return elif", "Create a new book with inventory 2) Create a new university 3) Create", "\", \" + ISBN13 + \", \\\"\" + DPublished + \"\\\", \" +", "into Departments values(\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql)", "+ \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg =", "\", \\\"\" + Publisher + \"\\\", \" + Edition + \", \\\"\" +", "of the representative: \") RLName = input(\"Last name of the representative: \") Street", "import cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title =", "= int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput > 7): print(invalidInputMsg) userInput =", "into CourseReq values(\" + ISBN + \", \\\"\" + CourseName + \"\\\", \\\"\"", "ISBN + \", \\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\");\")", "CourseName + \"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain():", "+ \"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName", "input(\"Enter the name of the university: \") RFName = input(\"First name of the", "print(\"All of these are foreign key constraints: \") CourseName = input(\"Enter the name", "input(\"First name of the representative: \") RLName = input(\"Last name of the representative:", "book with inventory 2) Create a new university 3) Create a new department", "new department 4) Create a new courses 5) Create a new book associations", "enter a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or", "pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter the title of the", "Title = input(\"Enter the title of the new book: \") ISBN = input(\"Enter", "\\\"\" + State + \"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql) finally: connection.close()", "\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close()", "these are foreign key constraints: \") CourseName = input(\"Enter the name of the", "\\\"\" + Category + \"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql) finally: connection.close()", "input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput > 7):", "< 1 or userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput", "+ ISBN13 + \", \\\"\" + DPublished + \"\\\", \" + Quantity +", "getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Courses values(\\\"\"", "def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your options:", "into Courses values(\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\", \\\"\"", "Title + \"\\\", \" + ISBN + \", \" + ISBN13 + \",", "= getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into BookDetails", "Courses values(\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\", \\\"\" +", "= (\"insert into CourseReq values(\" + ISBN + \", \\\"\" + CourseName +", "try: with connection.cursor() as cursor: sql = (\"insert into CourseReq values(\" + ISBN", "[1-7]: \"\"\") invalidInputMsg = \"Invalid input, please enter a valid input.\" print(welcomeMsg) userInput", "Language + \"\\\", \\\"\" + Category + \"\\\", \\\"\" + Author + \"\\\");\")", "= (\"insert into Courses values(\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName +", "Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name = input(\"Enter the name", "+ RFName + \"\\\", \\\"\" + RLName + \"\\\", \\\"\" + Street +", "the name of the university: \") ISBN = input(\"Enter the isbn of the", "+ \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName = input(\"Enter the name of", "try: with connection.cursor() as cursor: sql = (\"insert into Courses values(\\\"\" + CourseName", "7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook() elif (userInput", "= input(\"First name of the representative: \") RLName = input(\"Last name of the", "+ \"\\\", \" + Edition + \", \\\"\" + Language + \"\\\", \\\"\"", "\") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert", "the name of the university: \") RFName = input(\"First name of the representative:", "print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook() elif (userInput ==", "+ Title + \"\\\", \" + ISBN + \", \" + ISBN13 +", "name of the course: \") UniversityName = input(\"Enter the name of the university:", "City = input(\"City: \") State = input(\"State: \") Country = input(\"Country: \") connection", "\" + Quantity + \", \\\"\" + Publisher + \"\\\", \" + Edition", "+ ISBN + \", \\\"\" + CourseName + \"\\\", \\\"\" + UniversityName +", "+ Publisher + \"\\\", \" + Edition + \", \\\"\" + Language +", "Publisher + \"\\\", \" + Edition + \", \\\"\" + Language + \"\\\",", "values(\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\", \\\"\" + DeptName", "\"\\\", \\\"\" + RFName + \"\\\", \\\"\" + RLName + \"\\\", \\\"\" +", "1): newBook() elif (userInput == 2): newUniversity() elif (userInput == 3): newDepartment() elif", "= getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into CourseReq", "+ UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\")", "the department: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql", "cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of these are foreign key constraints: \")", "\"\\\", \" + Edition + \", \\\"\" + Language + \"\\\", \\\"\" +", "connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Courses values(\\\"\" +", "+ \"\\\", \\\"\" + Category + \"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql)", "values(\\\"\" + Name + \"\\\", \\\"\" + RFName + \"\\\", \\\"\" + RLName", "randint import datetime import pymysql import cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>',", "\\\"\" + RLName + \"\\\", \\\"\" + Street + \"\\\", \\\"\" + City", "connection.close() def newBAssociation(): print(\"All of these are foreign key constraints: \") CourseName =", "input(\"Enter the isbn of the book: \") connection = getConnection() connection.autocommit(True) try: with", "( \"\"\"Here are your options: 1) Create a new book with inventory 2)", "+ State + \"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def", "connection.close() def newCourses(): CourseName = input(\"Enter the name of the course: \") UniversityName", "= input (\"Enter the isbn 13: \") DPublished = input(\"Enter date published: \")", "\") Quantity = input(\"Enter quantity: \") Publisher = input(\"Enter publisher: \") Edition =", "the university: \") DeptName = input(\"Enter the name of the department: \") connection", "> 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook() elif", "with connection.cursor() as cursor: sql = (\"insert into Courses values(\\\"\" + CourseName +", "= input(\"Enter the isbn of the new book: \") ISBN13 = input (\"Enter", "inventory 2) Create a new university 3) Create a new department 4) Create", "elif (userInput == 4): newCourses() elif (userInput == 5): newBAssociation() elif (userInput ==", "\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close()", "newCourses(): CourseName = input(\"Enter the name of the course: \") UniversityName = input(\"Enter", "the course: \") UniversityName = input(\"Enter the name of the university: \") ISBN", "1) Create a new book with inventory 2) Create a new university 3)", "+ DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName = input(\"Enter the", "title of the new book: \") ISBN = input(\"Enter the isbn of the", "+ \"\\\", \" + Quantity + \", \\\"\" + Publisher + \"\\\", \"", "= input(\"Author: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql", "the representative: \") RLName = input(\"Last name of the representative: \") Street =", "+ \", \\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql)", "== 2): newUniversity() elif (userInput == 3): newDepartment() elif (userInput == 4): newCourses()", "the isbn 13: \") DPublished = input(\"Enter date published: \") Quantity = input(\"Enter", "constraints: \") CourseName = input(\"Enter the name of the course: \") UniversityName =", "input(\"Enter the isbn of the new book: \") ISBN13 = input (\"Enter the", "a new department 4) Create a new courses 5) Create a new book", "db='BookFetch') def newBook(): Title = input(\"Enter the title of the new book: \")", "CourseName + \"\\\", \\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\")", "mainOptionsMsg = ( \"\"\"Here are your options: 1) Create a new book with", "\"\\\", \\\"\" + RLName + \"\\\", \\\"\" + Street + \"\\\", \\\"\" +", "DPublished = input(\"Enter date published: \") Quantity = input(\"Enter quantity: \") Publisher =", "CourseName = input(\"Enter the name of the course: \") UniversityName = input(\"Enter the", "Quantity + \", \\\"\" + Publisher + \"\\\", \" + Edition + \",", "new courses 5) Create a new book associations 6) Return 7) Quit Enter", "input(\"Enter the name of the course: \") UniversityName = input(\"Enter the name of", "new book: \") ISBN13 = input (\"Enter the isbn 13: \") DPublished =", "input(\"Language: \") Category = input(\"Category: \") Author = input(\"Author: \") connection = getConnection()", "+ Language + \"\\\", \\\"\" + Category + \"\\\", \\\"\" + Author +", "welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your options: 1) Create", "Quantity = input(\"Enter quantity: \") Publisher = input(\"Enter publisher: \") Edition = input(\"Edition:", "isbn 13: \") DPublished = input(\"Enter date published: \") Quantity = input(\"Enter quantity:", "+ \", \\\"\" + Language + \"\\\", \\\"\" + Category + \"\\\", \\\"\"", "\"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg =", "connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into BookDetails values (\\\"\"", "are your options: 1) Create a new book with inventory 2) Create a", "input(\"Street: \") City = input(\"City: \") State = input(\"State: \") Country = input(\"Country:", "input(\"Enter publisher: \") Edition = input(\"Edition: \") Language = input(\"Language: \") Category =", "key constraints: \") CourseName = input(\"Enter the name of the course: \") UniversityName", "a new university 3) Create a new department 4) Create a new courses", "Author = input(\"Author: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor:", "= input(\"City: \") State = input(\"State: \") Country = input(\"Country: \") connection =", "= input(\"Enter quantity: \") Publisher = input(\"Enter publisher: \") Edition = input(\"Edition: \")", "adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your options: 1)", "a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput", "\"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name =", "\") State = input(\"State: \") Country = input(\"Country: \") connection = getConnection() connection.autocommit(True)", "\\\"\" + Street + \"\\\", \\\"\" + City + \"\\\", \\\"\" + State", "\") UniversityName = input(\"Enter the name of the university: \") DeptName = input(\"Enter", "= ( \"\"\"Here are your options: 1) Create a new book with inventory", "+ \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName", "as cursor: sql = (\"insert into Departments values(\\\"\" + UniversityName + \"\\\", \\\"\"", "random import randint import datetime import pymysql import cgi def getConnection(): return pymysql.connect(host='localhost',", "\") RLName = input(\"Last name of the representative: \") Street = input(\"Street: \")", "Create a new book associations 6) Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg", "date published: \") Quantity = input(\"Enter quantity: \") Publisher = input(\"Enter publisher: \")", "elif (userInput == 5): newBAssociation() elif (userInput == 6): return elif (userInput ==", "book: \") ISBN13 = input (\"Enter the isbn 13: \") DPublished = input(\"Enter", "of the book: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor:", "Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName = input(\"Enter the name", "or userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1):", "with inventory 2) Create a new university 3) Create a new department 4)", "RLName = input(\"Last name of the representative: \") Street = input(\"Street: \") City", "UniversityName = input(\"Enter the name of the university: \") ISBN = input(\"Enter the", "name of the university: \") RFName = input(\"First name of the representative: \")", "userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook()", "connection.close() def newDepartment(): UniversityName = input(\"Enter the name of the university: \") DeptName", "finally: connection.close() def newBAssociation(): print(\"All of these are foreign key constraints: \") CourseName", "(\\\"\" + Title + \"\\\", \" + ISBN + \", \" + ISBN13", "courses 5) Create a new book associations 6) Return 7) Quit Enter [1-7]:", "input(\"City: \") State = input(\"State: \") Country = input(\"Country: \") connection = getConnection()", "+ Street + \"\\\", \\\"\" + City + \"\\\", \\\"\" + State +", "= input(\"State: \") Country = input(\"Country: \") connection = getConnection() connection.autocommit(True) try: with", "connection.cursor() as cursor: sql = (\"insert into Departments values(\\\"\" + UniversityName + \"\\\",", "connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Universities values(\\\"\" +", "13: \") DPublished = input(\"Enter date published: \") Quantity = input(\"Enter quantity: \")", "cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here", "Create a new courses 5) Create a new book associations 6) Return 7)", "Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input, please enter a valid input.\" print(welcomeMsg)", "the name of the university: \") DeptName = input(\"Enter the name of the", "user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter the title of the new", "\") RFName = input(\"First name of the representative: \") RLName = input(\"Last name", "new university 3) Create a new department 4) Create a new courses 5)", "3): newDepartment() elif (userInput == 4): newCourses() elif (userInput == 5): newBAssociation() elif", "+ \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of these are foreign key", "finally: connection.close() def newDepartment(): UniversityName = input(\"Enter the name of the university: \")", "while(userInput < 1 or userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if", "= getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Departments", "sql = (\"insert into CourseReq values(\" + ISBN + \", \\\"\" + CourseName", "def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter the", "\"Invalid input, please enter a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput", "into BookDetails values (\\\"\" + Title + \"\\\", \" + ISBN + \",", "sql = (\"insert into BookDetails values (\\\"\" + Title + \"\\\", \" +", "new book: \") ISBN = input(\"Enter the isbn of the new book: \")", "book associations 6) Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input,", "+ UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def", "return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter the title of", "cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName = input(\"Enter the name of the university:", "with connection.cursor() as cursor: sql = (\"insert into CourseReq values(\" + ISBN +", "= input(\"Country: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql", "= input(\"Enter the name of the university: \") DeptName = input(\"Enter the name", "UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses():", "Universities values(\\\"\" + Name + \"\\\", \\\"\" + RFName + \"\\\", \\\"\" +", "connection.cursor() as cursor: sql = (\"insert into Courses values(\\\"\" + CourseName + \"\\\",", "values(\" + ISBN + \", \\\"\" + CourseName + \"\\\", \\\"\" + UniversityName", "UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg", "cursor: sql = (\"insert into Universities values(\\\"\" + Name + \"\\\", \\\"\" +", "input(\"Enter date published: \") Quantity = input(\"Enter quantity: \") Publisher = input(\"Enter publisher:", "the name of the department: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor()", "Departments values(\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally:", "\") Author = input(\"Author: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as", "options: 1) Create a new book with inventory 2) Create a new university", "= input(\"Enter date published: \") Quantity = input(\"Enter quantity: \") Publisher = input(\"Enter", "Language = input(\"Language: \") Category = input(\"Category: \") Author = input(\"Author: \") connection", "\"\\\", \\\"\" + City + \"\\\", \\\"\" + State + \"\\\", \\\"\" +", "Name + \"\\\", \\\"\" + RFName + \"\\\", \\\"\" + RLName + \"\\\",", "= input(\"Language: \") Category = input(\"Category: \") Author = input(\"Author: \") connection =", "connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into CourseReq values(\" +", "with connection.cursor() as cursor: sql = (\"insert into Departments values(\\\"\" + UniversityName +", "the course: \") UniversityName = input(\"Enter the name of the university: \") DeptName", "= getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Universities", "\") UniversityName = input(\"Enter the name of the university: \") ISBN = input(\"Enter", "userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook() elif (userInput == 2):", "State = input(\"State: \") Country = input(\"Country: \") connection = getConnection() connection.autocommit(True) try:", "input(\"State: \") Country = input(\"Country: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor()", "import pymysql import cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook():", "as cursor: sql = (\"insert into Courses values(\\\"\" + CourseName + \"\\\", \\\"\"", "+ Category + \"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def", "7) Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input, please enter a valid", "connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Departments values(\\\"\" +", "+ \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name = input(\"Enter the name of", "\"\\\", \" + Quantity + \", \\\"\" + Publisher + \"\\\", \" +", "\\\"\" + DPublished + \"\\\", \" + Quantity + \", \\\"\" + Publisher", "(\"insert into BookDetails values (\\\"\" + Title + \"\\\", \" + ISBN +", "connection.cursor() as cursor: sql = (\"insert into BookDetails values (\\\"\" + Title +", "= input(\"Enter publisher: \") Edition = input(\"Edition: \") Language = input(\"Language: \") Category", "Publisher = input(\"Enter publisher: \") Edition = input(\"Edition: \") Language = input(\"Language: \")", "print(\"\\n\") while(userInput < 1 or userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\")", "\" + ISBN13 + \", \\\"\" + DPublished + \"\\\", \" + Quantity", "invalidInputMsg = \"Invalid input, please enter a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg))", "isbn of the new book: \") ISBN13 = input (\"Enter the isbn 13:", "quantity: \") Publisher = input(\"Enter publisher: \") Edition = input(\"Edition: \") Language =", "a new courses 5) Create a new book associations 6) Return 7) Quit", "with connection.cursor() as cursor: sql = (\"insert into BookDetails values (\\\"\" + Title", "the name of the course: \") UniversityName = input(\"Enter the name of the", "== 3): newDepartment() elif (userInput == 4): newCourses() elif (userInput == 5): newBAssociation()", "= getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Courses", "of the new book: \") ISBN13 = input (\"Enter the isbn 13: \")", "pymysql import cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title", "\" + Edition + \", \\\"\" + Language + \"\\\", \\\"\" + Category", "input(\"Edition: \") Language = input(\"Language: \") Category = input(\"Category: \") Author = input(\"Author:", "newDepartment(): UniversityName = input(\"Enter the name of the university: \") DeptName = input(\"Enter", "+ \"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name", "ISBN13 + \", \\\"\" + DPublished + \"\\\", \" + Quantity + \",", "input(\"Enter the title of the new book: \") ISBN = input(\"Enter the isbn", "\"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of these are foreign key constraints:", "\") Edition = input(\"Edition: \") Language = input(\"Language: \") Category = input(\"Category: \")", "\"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name = input(\"Enter the name of the", "as cursor: sql = (\"insert into BookDetails values (\\\"\" + Title + \"\\\",", "\\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of these", "a new book with inventory 2) Create a new university 3) Create a", "cursor.execute(sql) finally: connection.close() def newUniversity(): Name = input(\"Enter the name of the university:", "= input(\"Enter the name of the course: \") UniversityName = input(\"Enter the name", "the isbn of the book: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor()", "getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter the title", "name of the representative: \") Street = input(\"Street: \") City = input(\"City: \")", "+ \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName = input(\"Enter the name of", "new book with inventory 2) Create a new university 3) Create a new", "= int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook() elif (userInput == 2): newUniversity()", "DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName = input(\"Enter the name", "the book: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql", "representative: \") RLName = input(\"Last name of the representative: \") Street = input(\"Street:", "of the department: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor:", "newUniversity(): Name = input(\"Enter the name of the university: \") RFName = input(\"First", "+ \"\\\", \" + ISBN + \", \" + ISBN13 + \", \\\"\"", "the university: \") ISBN = input(\"Enter the isbn of the book: \") connection", "cursor.execute(sql) finally: connection.close() def newCourses(): CourseName = input(\"Enter the name of the course:", "please enter a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1", "RLName + \"\\\", \\\"\" + Street + \"\\\", \\\"\" + City + \"\\\",", "def newDepartment(): UniversityName = input(\"Enter the name of the university: \") DeptName =", "\"\\\", \\\"\" + Category + \"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql) finally:", "\\\"\" + RFName + \"\\\", \\\"\" + RLName + \"\\\", \\\"\" + Street", "cursor: sql = (\"insert into Courses values(\\\"\" + CourseName + \"\\\", \\\"\" +", "= input(\"Enter the isbn of the book: \") connection = getConnection() connection.autocommit(True) try:", "= (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your options: 1) Create a", "the new book: \") ISBN13 = input (\"Enter the isbn 13: \") DPublished", "\") City = input(\"City: \") State = input(\"State: \") Country = input(\"Country: \")", "university 3) Create a new department 4) Create a new courses 5) Create", "\"\\\", \\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally:", "course: \") UniversityName = input(\"Enter the name of the university: \") DeptName =", "(\"Enter the isbn 13: \") DPublished = input(\"Enter date published: \") Quantity =", "\"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName = input(\"Enter the name of the", "representative: \") Street = input(\"Street: \") City = input(\"City: \") State = input(\"State:", "UniversityName = input(\"Enter the name of the university: \") DeptName = input(\"Enter the", "\", \\\"\" + Language + \"\\\", \\\"\" + Category + \"\\\", \\\"\" +", "= (\"insert into Departments values(\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName +", "1 or userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") if (userInput ==", "\\\"\" + Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName = input(\"Enter", "+ \"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg", "publisher: \") Edition = input(\"Edition: \") Language = input(\"Language: \") Category = input(\"Category:", "Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your options: 1) Create a new book", "a new book associations 6) Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg =", "input(\"Enter quantity: \") Publisher = input(\"Enter publisher: \") Edition = input(\"Edition: \") Language", "\"\"\"Here are your options: 1) Create a new book with inventory 2) Create", "Name = input(\"Enter the name of the university: \") RFName = input(\"First name", "DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of these are foreign", "Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input, please enter a valid input.\"", "\") ISBN = input(\"Enter the isbn of the new book: \") ISBN13 =", "foreign key constraints: \") CourseName = input(\"Enter the name of the course: \")", "getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into CourseReq values(\"", "Street = input(\"Street: \") City = input(\"City: \") State = input(\"State: \") Country", "newBAssociation(): print(\"All of these are foreign key constraints: \") CourseName = input(\"Enter the", "finally: connection.close() def newUniversity(): Name = input(\"Enter the name of the university: \")", "cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter", "as cursor: sql = (\"insert into Universities values(\\\"\" + Name + \"\\\", \\\"\"", "department 4) Create a new courses 5) Create a new book associations 6)", "of the representative: \") Street = input(\"Street: \") City = input(\"City: \") State", "(userInput == 4): newCourses() elif (userInput == 5): newBAssociation() elif (userInput == 6):", "department: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql =", "\"\\\", \\\"\" + Street + \"\\\", \\\"\" + City + \"\\\", \\\"\" +", "+ \"\\\", \\\"\" + RFName + \"\\\", \\\"\" + RLName + \"\\\", \\\"\"", "\") DPublished = input(\"Enter date published: \") Quantity = input(\"Enter quantity: \") Publisher", "RFName + \"\\\", \\\"\" + RLName + \"\\\", \\\"\" + Street + \"\\\",", "values(\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close()", "connection.cursor() as cursor: sql = (\"insert into Universities values(\\\"\" + Name + \"\\\",", "getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into BookDetails values", "+ Edition + \", \\\"\" + Language + \"\\\", \\\"\" + Category +", "= input(\"Last name of the representative: \") Street = input(\"Street: \") City =", "name of the department: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as", "Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input, please enter a", "+ ISBN + \", \" + ISBN13 + \", \\\"\" + DPublished +", "userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput > 7): print(invalidInputMsg) userInput", "+ RLName + \"\\\", \\\"\" + Street + \"\\\", \\\"\" + City +", "+ DPublished + \"\\\", \" + Quantity + \", \\\"\" + Publisher +", "cursor: sql = (\"insert into Departments values(\\\"\" + UniversityName + \"\\\", \\\"\" +", "= input(\"Street: \") City = input(\"City: \") State = input(\"State: \") Country =", "int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput > 7): print(invalidInputMsg) userInput = int(input(mainOptionsMsg))", "6) Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input, please enter", "State + \"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment():", "the representative: \") Street = input(\"Street: \") City = input(\"City: \") State =", "= input(\"Category: \") Author = input(\"Author: \") connection = getConnection() connection.autocommit(True) try: with", "import randint import datetime import pymysql import cgi def getConnection(): return pymysql.connect(host='localhost', user='root',", "newCourses() elif (userInput == 5): newBAssociation() elif (userInput == 6): return elif (userInput", "def newBAssociation(): print(\"All of these are foreign key constraints: \") CourseName = input(\"Enter", "Category = input(\"Category: \") Author = input(\"Author: \") connection = getConnection() connection.autocommit(True) try:", "\\\"\" + Language + \"\\\", \\\"\" + Category + \"\\\", \\\"\" + Author", "\"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = (", "connection.close() def newUniversity(): Name = input(\"Enter the name of the university: \") RFName", "(\"insert into Courses values(\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\",", "name of the university: \") DeptName = input(\"Enter the name of the department:", "\\\"\" + City + \"\\\", \\\"\" + State + \"\\\", \\\"\" + Country", "of the university: \") DeptName = input(\"Enter the name of the department: \")", "int(input(mainOptionsMsg)) print(\"\\n\") if (userInput == 1): newBook() elif (userInput == 2): newUniversity() elif", "= input(\"Enter the name of the department: \") connection = getConnection() connection.autocommit(True) try:", "university: \") RFName = input(\"First name of the representative: \") RLName = input(\"Last", "cursor: sql = (\"insert into BookDetails values (\\\"\" + Title + \"\\\", \"", "ISBN = input(\"Enter the isbn of the new book: \") ISBN13 = input", "RFName = input(\"First name of the representative: \") RLName = input(\"Last name of", "2): newUniversity() elif (userInput == 3): newDepartment() elif (userInput == 4): newCourses() elif", "\\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin", "associations 6) Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid input, please", "(\"insert into CourseReq values(\" + ISBN + \", \\\"\" + CourseName + \"\\\",", "newBook(): Title = input(\"Enter the title of the new book: \") ISBN =", "input(\"Enter the name of the university: \") ISBN = input(\"Enter the isbn of", "\") CourseName = input(\"Enter the name of the course: \") UniversityName = input(\"Enter", "as cursor: sql = (\"insert into CourseReq values(\" + ISBN + \", \\\"\"", "DPublished + \"\\\", \" + Quantity + \", \\\"\" + Publisher + \"\\\",", "= (\"insert into BookDetails values (\\\"\" + Title + \"\\\", \" + ISBN", "input(\"Country: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql =", "of these are foreign key constraints: \") CourseName = input(\"Enter the name of", "\") Country = input(\"Country: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as", "print(\"\\n\") if (userInput == 1): newBook() elif (userInput == 2): newUniversity() elif (userInput", "+ CourseName + \"\\\", \\\"\" + UniversityName + \"\\\", \\\"\" + DeptName +", "def newUniversity(): Name = input(\"Enter the name of the university: \") RFName =", "finally: connection.close() def newCourses(): CourseName = input(\"Enter the name of the course: \")", "finally: connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are", "\") Language = input(\"Language: \") Category = input(\"Category: \") Author = input(\"Author: \")", "name of the university: \") ISBN = input(\"Enter the isbn of the book:", "newUniversity() elif (userInput == 3): newDepartment() elif (userInput == 4): newCourses() elif (userInput", "+ \", \\\"\" + DPublished + \"\\\", \" + Quantity + \", \\\"\"", "def newBook(): Title = input(\"Enter the title of the new book: \") ISBN", "CourseReq values(\" + ISBN + \", \\\"\" + CourseName + \"\\\", \\\"\" +", "connection.close() def adminModuleMain(): welcomeMsg = (\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your", "+ Name + \"\\\", \\\"\" + RFName + \"\\\", \\\"\" + RLName +", "values (\\\"\" + Title + \"\\\", \" + ISBN + \", \" +", "\\\"\" + Publisher + \"\\\", \" + Edition + \", \\\"\" + Language", "= (\"insert into Universities values(\\\"\" + Name + \"\\\", \\\"\" + RFName +", "book: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql =", "of the university: \") ISBN = input(\"Enter the isbn of the book: \")", "getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Departments values(\\\"\"", "\\\"\" + Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name = input(\"Enter", "datetime import pymysql import cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch') def", "into Universities values(\\\"\" + Name + \"\\\", \\\"\" + RFName + \"\\\", \\\"\"", "Create a new university 3) Create a new department 4) Create a new", "Edition = input(\"Edition: \") Language = input(\"Language: \") Category = input(\"Category: \") Author", "cursor: sql = (\"insert into CourseReq values(\" + ISBN + \", \\\"\" +", "\"\"\") invalidInputMsg = \"Invalid input, please enter a valid input.\" print(welcomeMsg) userInput =", "+ Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity(): Name = input(\"Enter the", "+ Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName = input(\"Enter the", "try: with connection.cursor() as cursor: sql = (\"insert into Departments values(\\\"\" + UniversityName", "BookDetails values (\\\"\" + Title + \"\\\", \" + ISBN + \", \"", "Street + \"\\\", \\\"\" + City + \"\\\", \\\"\" + State + \"\\\",", "(userInput == 1): newBook() elif (userInput == 2): newUniversity() elif (userInput == 3):", "elif (userInput == 2): newUniversity() elif (userInput == 3): newDepartment() elif (userInput ==", "try: with connection.cursor() as cursor: sql = (\"insert into BookDetails values (\\\"\" +", "+ CourseName + \"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally: connection.close() def", "+ City + \"\\\", \\\"\" + State + \"\\\", \\\"\" + Country +", "== 4): newCourses() elif (userInput == 5): newBAssociation() elif (userInput == 6): return", "+ \"\\\", \\\"\" + RLName + \"\\\", \\\"\" + Street + \"\\\", \\\"\"", "Create a new department 4) Create a new courses 5) Create a new", "= input(\"Enter the name of the university: \") ISBN = input(\"Enter the isbn", "\\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName = input(\"Enter", "the title of the new book: \") ISBN = input(\"Enter the isbn of", "input(\"Enter the name of the department: \") connection = getConnection() connection.autocommit(True) try: with", "\" + ISBN + \", \" + ISBN13 + \", \\\"\" + DPublished", "valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput >", "sql = (\"insert into Departments values(\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName", "5): newBAssociation() elif (userInput == 6): return elif (userInput == 7): quit() adminModuleMain()", "\") DeptName = input(\"Enter the name of the department: \") connection = getConnection()", "of the university: \") RFName = input(\"First name of the representative: \") RLName", "\"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName =", "password='<PASSWORD>', db='BookFetch') def newBook(): Title = input(\"Enter the title of the new book:", "sql = (\"insert into Universities values(\\\"\" + Name + \"\\\", \\\"\" + RFName", "+ \"\\\", \\\"\" + State + \"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql)", "DeptName = input(\"Enter the name of the department: \") connection = getConnection() connection.autocommit(True)", "with connection.cursor() as cursor: sql = (\"insert into Universities values(\\\"\" + Name +", "the university: \") RFName = input(\"First name of the representative: \") RLName =", "5) Create a new book associations 6) Return 7) Quit Enter [1-7]: \"\"\")", "+ DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All of these are", "\", \\\"\" + CourseName + \"\\\", \\\"\" + UniversityName + \"\\\");\") cursor.execute(sql) finally:", "newDepartment() elif (userInput == 4): newCourses() elif (userInput == 5): newBAssociation() elif (userInput", "\"\\\", \" + ISBN + \", \" + ISBN13 + \", \\\"\" +", "ISBN = input(\"Enter the isbn of the book: \") connection = getConnection() connection.autocommit(True)", "+ \", \" + ISBN13 + \", \\\"\" + DPublished + \"\\\", \"", "2) Create a new university 3) Create a new department 4) Create a", "input, please enter a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput <", "+ \"\\\", \\\"\" + Street + \"\\\", \\\"\" + City + \"\\\", \\\"\"", "\"\\\");\") cursor.execute(sql) finally: connection.close() def newCourses(): CourseName = input(\"Enter the name of the", "input(\"Category: \") Author = input(\"Author: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor()", "book: \") ISBN = input(\"Enter the isbn of the new book: \") ISBN13", "\") Publisher = input(\"Enter publisher: \") Edition = input(\"Edition: \") Language = input(\"Language:", "\"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql) finally: connection.close() def newDepartment(): UniversityName =", "= input(\"Enter the title of the new book: \") ISBN = input(\"Enter the", "if (userInput == 1): newBook() elif (userInput == 2): newUniversity() elif (userInput ==", "+ \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation(): print(\"All", "of the course: \") UniversityName = input(\"Enter the name of the university: \")", "def newCourses(): CourseName = input(\"Enter the name of the course: \") UniversityName =", "newBook() elif (userInput == 2): newUniversity() elif (userInput == 3): newDepartment() elif (userInput", "= input(\"Edition: \") Language = input(\"Language: \") Category = input(\"Category: \") Author =", "\") Category = input(\"Category: \") Author = input(\"Author: \") connection = getConnection() connection.autocommit(True)", "ISBN + \", \" + ISBN13 + \", \\\"\" + DPublished + \"\\\",", "import datetime import pymysql import cgi def getConnection(): return pymysql.connect(host='localhost', user='root', password='<PASSWORD>', db='BookFetch')", "input (\"Enter the isbn 13: \") DPublished = input(\"Enter date published: \") Quantity", "== 5): newBAssociation() elif (userInput == 6): return elif (userInput == 7): quit()", "your options: 1) Create a new book with inventory 2) Create a new", "= \"Invalid input, please enter a valid input.\" print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\")", "published: \") Quantity = input(\"Enter quantity: \") Publisher = input(\"Enter publisher: \") Edition", "input(\"Last name of the representative: \") Street = input(\"Street: \") City = input(\"City:", "(userInput == 3): newDepartment() elif (userInput == 4): newCourses() elif (userInput == 5):", "+ \"\\\", \\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql)", "new book associations 6) Return 7) Quit Enter [1-7]: \"\"\") invalidInputMsg = \"Invalid", "Category + \"\\\", \\\"\" + Author + \"\\\");\") cursor.execute(sql) finally: connection.close() def newUniversity():", "elif (userInput == 3): newDepartment() elif (userInput == 4): newCourses() elif (userInput ==", "\") ISBN13 = input (\"Enter the isbn 13: \") DPublished = input(\"Enter date", "= input(\"Enter the name of the university: \") RFName = input(\"First name of", "input(\"Author: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql =", "Edition + \", \\\"\" + Language + \"\\\", \\\"\" + Category + \"\\\",", "getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into Universities values(\\\"\"", "from random import randint import datetime import pymysql import cgi def getConnection(): return", "connection.cursor() as cursor: sql = (\"insert into CourseReq values(\" + ISBN + \",", "Country = input(\"Country: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor:", "the isbn of the new book: \") ISBN13 = input (\"Enter the isbn", "+ \", \\\"\" + Publisher + \"\\\", \" + Edition + \", \\\"\"", "are foreign key constraints: \") CourseName = input(\"Enter the name of the course:", "City + \"\\\", \\\"\" + State + \"\\\", \\\"\" + Country + \"\\\");\")", "sql = (\"insert into Courses values(\\\"\" + CourseName + \"\\\", \\\"\" + UniversityName", "university: \") DeptName = input(\"Enter the name of the department: \") connection =", "3) Create a new department 4) Create a new courses 5) Create a", "university: \") ISBN = input(\"Enter the isbn of the book: \") connection =", "isbn of the book: \") connection = getConnection() connection.autocommit(True) try: with connection.cursor() as", "UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\") cursor.execute(sql) finally: connection.close() def newBAssociation():", "== 1): newBook() elif (userInput == 2): newUniversity() elif (userInput == 3): newDepartment()", "+ Quantity + \", \\\"\" + Publisher + \"\\\", \" + Edition +", "ISBN13 = input (\"Enter the isbn 13: \") DPublished = input(\"Enter date published:", "(\"insert into Departments values(\\\"\" + UniversityName + \"\\\", \\\"\" + DeptName + \"\\\");\")", "\") Street = input(\"Street: \") City = input(\"City: \") State = input(\"State: \")", "course: \") UniversityName = input(\"Enter the name of the university: \") ISBN =", "of the new book: \") ISBN = input(\"Enter the isbn of the new", "print(welcomeMsg) userInput = int(input(mainOptionsMsg)) print(\"\\n\") while(userInput < 1 or userInput > 7): print(invalidInputMsg)", "the new book: \") ISBN = input(\"Enter the isbn of the new book:", "connection = getConnection() connection.autocommit(True) try: with connection.cursor() as cursor: sql = (\"insert into", "4) Create a new courses 5) Create a new book associations 6) Return", "input(\"Enter the name of the university: \") DeptName = input(\"Enter the name of", "\") ISBN = input(\"Enter the isbn of the book: \") connection = getConnection()", "(\"insert into Universities values(\\\"\" + Name + \"\\\", \\\"\" + RFName + \"\\\",", "(userInput == 2): newUniversity() elif (userInput == 3): newDepartment() elif (userInput == 4):", "\", \\\"\" + DPublished + \"\\\", \" + Quantity + \", \\\"\" +", "(\"---------------------\\nAdmin Module\\n---------------------\") mainOptionsMsg = ( \"\"\"Here are your options: 1) Create a new", "\"\\\", \\\"\" + State + \"\\\", \\\"\" + Country + \"\\\");\") cursor.execute(sql) finally:", "try: with connection.cursor() as cursor: sql = (\"insert into Universities values(\\\"\" + Name" ]
[ "np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc)) dy_tmp = (re *", "/ (psi + rc * ql**2.0)) dx = dx_tmp * cs - dy_tmp", "sy * cs psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp", "potential_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): tr =", "sx + sn2 * sy) dy2 = ext_shears * (sn2 * sx -", "= ext_shears * (cs2 * sx + sn2 * sy) dy2 = ext_shears", "* (x - x0) / r, res * (y - y0) / r", "sy) # external kappa dx3 = ext_kappa * sx dy3 = ext_kappa *", "shear tr2 = np.pi * (ext_angle / 180.0) cs2 = np.cos(2.0 * tr2)", "sn + sy * cs psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) +", "ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc)) dy_tmp", "__name__ == '__main__': #import pylab as pl #r = np.linspace(0.0, 3.0, 100) #y", "* (theta / 180.0) # + np.pi / 2.0 sx = x -", "y0 cs = np.cos(tr) sn = np.sin(tr) sx_r = sx * cs +", "(sn2 * sx - cs2 * sy) # external kappa dx3 = ext_kappa", "SIE lens model tr = np.pi * (theta / 180.0) # + np.pi", "ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): tr = np.pi * (theta", "sn2 = np.sin(2.0 * tr2) dx2 = ext_shears * (cs2 * sx +", "dx2 + dx3, dy + dy2 + dy3 def potential_nie(x0, y0, theta, ql,", "- y0)**2.0) res = re * (r - np.sqrt(a * a + r", "+ dx3, dy + dy2 + dy3 def potential_nie(x0, y0, theta, ql, re,", "y0) / r def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa,", "* dy_tmp - 0.5 * re * \\ np.sqrt(ql) * rc * np.log((psi", "- y0) / r def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle,", "- y0)**2.0) res = re / r * (np.sqrt(rc * rc + r", "sy + 0.5 * cs2 * (sx**2.0 - sy**2.0)) # external kappa pot_kaps", "\\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0)) pot_SIE", "dx_tmp + sy_r * dy_tmp - 0.5 * re * \\ np.sqrt(ql) *", "dx + dx2 + dx3, dy + dy2 + dy3 def potential_nie(x0, y0,", "0.5 return pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x,", "* sn + dy_tmp * cs # external shear tr2 = np.pi *", "180.0) # + np.pi / 2.0 sx = x - x0 sy =", "(r - np.sqrt(a * a + r * r) + a) + re", "ext_kappa * (sx**2.0 + sy**2.0) * 0.5 return pot_SIE + pot_exts + pot_kaps", "deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE", "np.log((a + np.sqrt(a * a + r * r)) / (2.0 * a))", "* sn dy = dx_tmp * sn + dy_tmp * cs # external", "* (sn2 * sx * sy + 0.5 * cs2 * (sx**2.0 -", "np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r /", "(np.sqrt(r * r) - np.sqrt(1.0 * 1.0 + r * r) + 1.0)", "np.sqrt(a * a + r * r) + a) return res * (x", "return res #if __name__ == '__main__': #import pylab as pl #r = np.linspace(0.0,", "* re * \\ np.sqrt(ql) * rc * np.log((psi + rc)**2.0 + (1.0", "= np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp = (re * np.sqrt(ql)", "rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0)) # external shear tr2 = np.pi", "= np.sin(2.0 * tr2) pot_exts = ext_shears * (sn2 * sx * sy", "r * r)) / (2.0 * a)) return res #if __name__ == '__main__':", "np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0)) pot_SIE =", "* ql**2.0)) pot_SIE = sx_r * dx_tmp + sy_r * dy_tmp - 0.5", "+ sy * cs psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0)", "(re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) *", "y): # SIE lens model tr = np.pi * (theta / 180.0) #", "+ (y - y0)**2.0) res = re * (r - np.sqrt(a * a", "0.5 * re * \\ np.sqrt(ql) * rc * np.log((psi + rc)**2.0 +", "+ re * a * \\ np.log((a + np.sqrt(a * a + r", "sx_r * dx_tmp + sy_r * dy_tmp - 0.5 * re * \\", "x0)**2.0 + (y - y0)**2.0) res = re / r * (np.sqrt(rc *", "# + np.pi / 2.0 sx = x - x0 sy = y", "- x0)**2.0 + (y - y0)**2.0) res = re * (r - np.sqrt(a", "* sn + sy * cs psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0)", "res * (y - y0) / r def deflection_nie(x0, y0, theta, ql, re,", "y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 + (y", "(psi + rc * ql**2.0)) pot_SIE = sx_r * dx_tmp + sy_r *", "sy**2.0)) # external kappa pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) * 0.5", "y0)**2.0) res = re * (r - np.sqrt(a * a + r *", "dx3, dy + dy2 + dy3 def potential_nie(x0, y0, theta, ql, re, rc,", "a + r * r) + a) return res * (x - x0)", "np.sin(2.0 * tr2) pot_exts = ext_shears * (sn2 * sx * sy +", "pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x, y): #", "* (np.sqrt(r * r) - np.sqrt(1.0 * 1.0 + r * r) +", "np.sin(tr) sx_r = sx * cs + sy * sn sy_r = -sx", "sy = y - y0 cs = np.cos(tr) sn = np.sin(tr) sx_r =", "rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE lens model tr = np.pi", "sy_r**2.0) dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0", "r) + a) + re * a * \\ np.log((a + np.sqrt(a *", "y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE lens", "return pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x, y):", "= -sx * sn + sy * cs psi = np.sqrt(ql**2.0 * (rc**2.0", "rc)) dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0", "+ sy_r**2.0) dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\", "x, y): tr = np.pi * (theta / 180.0) # + np.pi /", "+ a) + re * a * \\ np.log((a + np.sqrt(a * a", "tr2) dx2 = ext_shears * (cs2 * sx + sn2 * sy) dy2", "dx2 = ext_shears * (cs2 * sx + sn2 * sy) dy2 =", "#import pylab as pl #r = np.linspace(0.0, 3.0, 100) #y = 1.0 /", "= # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res", "tr = np.pi * (theta / 180.0) # + np.pi / 2.0 sx", "cs2 * (sx**2.0 - sy**2.0)) # external kappa pot_kaps = ext_kappa * (sx**2.0", "* sx - cs2 * sy) # external kappa dx3 = ext_kappa *", "* 0.5 return pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re, a,", "ext_angle, ext_kappa, x, y): tr = np.pi * (theta / 180.0) # +", "def pot_sub_pJaffe(x0, y0, re, a, x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r", "(sx**2.0 + sy**2.0) * 0.5 return pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0,", "ext_kappa * sx dy3 = ext_kappa * sy return dx + dx2 +", "ext_angle, ext_kappa, x, y): # SIE lens model tr = np.pi * (theta", "sx dy3 = ext_kappa * sy return dx + dx2 + dx3, dy", "res = re * (r - np.sqrt(a * a + r * r)", "== '__main__': #import pylab as pl #r = np.linspace(0.0, 3.0, 100) #y =", "np.sqrt(a * a + r * r)) / (2.0 * a)) return res", "np.sin(2.0 * tr2) dx2 = ext_shears * (cs2 * sx + sn2 *", "= np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re / r", "def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): #", "dx = dx_tmp * cs - dy_tmp * sn dy = dx_tmp *", "= np.sin(tr) sx_r = sx * cs + sy * sn sy_r =", "+ sy * sn sy_r = -sx * sn + sy * cs", "= ext_kappa * (sx**2.0 + sy**2.0) * 0.5 return pot_SIE + pot_exts +", "= ext_kappa * sy return dx + dx2 + dx3, dy + dy2", "= np.linspace(0.0, 3.0, 100) #y = 1.0 / r * (np.sqrt(r * r)", "+ (1.0 - (ql**2.0)) * (sx_r**2.0)) # external shear tr2 = np.pi *", "* (sx**2.0 - sy**2.0)) # external kappa pot_kaps = ext_kappa * (sx**2.0 +", "+ dx2 + dx3, dy + dy2 + dy3 def potential_nie(x0, y0, theta,", "sn = np.sin(tr) sx_r = sx * cs + sy * sn sy_r", "* (r - np.sqrt(a * a + r * r) + a) +", "re, rc, a, x, y): r = np.sqrt((x - x0)**2.0 + (y -", "rc - np.sqrt(a * a + r * r) + a) return res", "* r) + a) + re * a * \\ np.log((a + np.sqrt(a", "/ 180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) pot_exts", "* sy + 0.5 * cs2 * (sx**2.0 - sy**2.0)) # external kappa", "np.pi * (ext_angle / 180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0", "sy return dx + dx2 + dx3, dy + dy2 + dy3 def", "= re * (r - np.sqrt(a * a + r * r) +", "/ r, res * (y - y0) / r def deflection_nie(x0, y0, theta,", "- np.sqrt(a * a + r * r) + a) + re *", "(re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) *", "dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 -", "\\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0)) dx", "(1.0 - (ql**2.0)) * (sx_r**2.0)) # external shear tr2 = np.pi * (ext_angle", "* (ext_angle / 180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 *", "* a + r * r) + a) return res * (x -", "res = re / r * (np.sqrt(rc * rc + r * r)", "res #if __name__ == '__main__': #import pylab as pl #r = np.linspace(0.0, 3.0,", "'__main__': #import pylab as pl #r = np.linspace(0.0, 3.0, 100) #y = 1.0", "(ext_angle / 180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2)", "pot_exts = ext_shears * (sn2 * sx * sy + 0.5 * cs2", "* (sx_r**2.0)) # external shear tr2 = np.pi * (ext_angle / 180.0) cs2", "re / r * (np.sqrt(rc * rc + r * r) - rc", "cs # external shear tr2 = np.pi * (ext_angle / 180.0) cs2 =", "rc + r * r) - rc - np.sqrt(a * a + r", "np.sqrt(ql) * rc * np.log((psi + rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0))", "dy + dy2 + dy3 def potential_nie(x0, y0, theta, ql, re, rc, ext_shears,", "ext_kappa, x, y): tr = np.pi * (theta / 180.0) # + np.pi", "dy2 = ext_shears * (sn2 * sx - cs2 * sy) # external", "r * r) + a) return res * (x - x0) / r,", "ext_shears * (cs2 * sx + sn2 * sy) dy2 = ext_shears *", "rc, ext_shears, ext_angle, ext_kappa, x, y): tr = np.pi * (theta / 180.0)", "res * (x - x0) / r, res * (y - y0) /", "* tr2) sn2 = np.sin(2.0 * tr2) pot_exts = ext_shears * (sn2 *", "= np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) pot_exts = ext_shears *", "<filename>pages/clean_box/images/sims_for_hoopla/pot_ext_shears_kappa.py import numpy as np def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y):", "- ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc))", "= ext_shears * (sn2 * sx - cs2 * sy) # external kappa", "dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 -", "0.5 * cs2 * (sx**2.0 - sy**2.0)) # external kappa pot_kaps = ext_kappa", "y0, re, a, x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x", "a + r * r) + a) + re * a * \\", "* sy return dx + dx2 + dx3, dy + dy2 + dy3", "+ pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x, y): # res", "* tr2) dx2 = ext_shears * (cs2 * sx + sn2 * sy)", "return dx + dx2 + dx3, dy + dy2 + dy3 def potential_nie(x0,", "ql**2.0)) pot_SIE = sx_r * dx_tmp + sy_r * dy_tmp - 0.5 *", "y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): tr = np.pi", "np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0)) dx =", "- ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc", "/ (2.0 * a)) return res #if __name__ == '__main__': #import pylab as", "* dx_tmp + sy_r * dy_tmp - 0.5 * re * \\ np.sqrt(ql)", "#if __name__ == '__main__': #import pylab as pl #r = np.linspace(0.0, 3.0, 100)", "as pl #r = np.linspace(0.0, 3.0, 100) #y = 1.0 / r *", "+ r * r)) / (2.0 * a)) return res #if __name__ ==", "external kappa pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) * 0.5 return pot_SIE", "theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): tr = np.pi *", "a)) return res #if __name__ == '__main__': #import pylab as pl #r =", "lens model tr = np.pi * (theta / 180.0) # + np.pi /", "/ np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi", "sn dy = dx_tmp * sn + dy_tmp * cs # external shear", "3.0, 100) #y = 1.0 / r * (np.sqrt(r * r) - np.sqrt(1.0", "= ext_shears * (sn2 * sx * sy + 0.5 * cs2 *", "+ r * r) + a) + re * a * \\ np.log((a", "+ rc * ql**2.0)) dx = dx_tmp * cs - dy_tmp * sn", "* (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0", "cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) pot_exts = ext_shears", "* \\ np.sqrt(ql) * rc * np.log((psi + rc)**2.0 + (1.0 - (ql**2.0))", "x, y): # SIE lens model tr = np.pi * (theta / 180.0)", "= sx_r * dx_tmp + sy_r * dy_tmp - 0.5 * re *", "= re / r * (np.sqrt(rc * rc + r * r) -", "* sy_r / (psi + rc * ql**2.0)) dx = dx_tmp * cs", "np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi +", "= np.pi * (ext_angle / 180.0) cs2 = np.cos(2.0 * tr2) sn2 =", "* r) - np.sqrt(1.0 * 1.0 + r * r) + 1.0) #pl.plot(r,y,'k-')", "x0 sy = y - y0 cs = np.cos(tr) sn = np.sin(tr) sx_r", "tr2) pot_exts = ext_shears * (sn2 * sx * sy + 0.5 *", "sy_r / (psi + rc * ql**2.0)) pot_SIE = sx_r * dx_tmp +", "cs + sy * sn sy_r = -sx * sn + sy *", "dy3 = ext_kappa * sy return dx + dx2 + dx3, dy +", "* sx dy3 = ext_kappa * sy return dx + dx2 + dx3,", "/ r def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x,", "- 0.5 * re * \\ np.sqrt(ql) * rc * np.log((psi + rc)**2.0", "re, rc, ext_shears, ext_angle, ext_kappa, x, y): tr = np.pi * (theta /", "+ (y - y0)**2.0) res = re / r * (np.sqrt(rc * rc", "\\ np.log((a + np.sqrt(a * a + r * r)) / (2.0 *", "/ (psi + rc * ql**2.0)) pot_SIE = sx_r * dx_tmp + sy_r", "np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp = (re * np.sqrt(ql) /", "- x0)**2.0 + (y - y0)**2.0) res = re / r * (np.sqrt(rc", "(y - y0)**2.0) res = re / r * (np.sqrt(rc * rc +", "= (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0)", "+ dy_tmp * cs # external shear tr2 = np.pi * (ext_angle /", "= ext_kappa * sx dy3 = ext_kappa * sy return dx + dx2", "x0)**2.0 + (y - y0)**2.0) res = re * (r - np.sqrt(a *", "tr2) sn2 = np.sin(2.0 * tr2) dx2 = ext_shears * (cs2 * sx", "/ 180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) dx2", "- np.sqrt(a * a + r * r) + a) return res *", "y): tr = np.pi * (theta / 180.0) # + np.pi / 2.0", "import numpy as np def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y): r", "y - y0 cs = np.cos(tr) sn = np.sin(tr) sx_r = sx *", "sn2 * sy) dy2 = ext_shears * (sn2 * sx - cs2 *", "- rc - np.sqrt(a * a + r * r) + a) return", "np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r /", "+ r * r) - rc - np.sqrt(a * a + r *", "(psi + rc)) dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) *", "model tr = np.pi * (theta / 180.0) # + np.pi / 2.0", "* (np.sqrt(rc * rc + r * r) - rc - np.sqrt(a *", "ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE lens model tr", "tr2) sn2 = np.sin(2.0 * tr2) pot_exts = ext_shears * (sn2 * sx", "dx_tmp * sn + dy_tmp * cs # external shear tr2 = np.pi", "* sn sy_r = -sx * sn + sy * cs psi =", "a + r * r)) / (2.0 * a)) return res #if __name__", "deflection_sub_pJaffe(x0, y0, re, rc, a, x, y): r = np.sqrt((x - x0)**2.0 +", "dy_tmp - 0.5 * re * \\ np.sqrt(ql) * rc * np.log((psi +", "* rc + r * r) - rc - np.sqrt(a * a +", "= y - y0 cs = np.cos(tr) sn = np.sin(tr) sx_r = sx", "np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) dx2 = ext_shears * (cs2", "* r)) / (2.0 * a)) return res #if __name__ == '__main__': #import", "sy) dy2 = ext_shears * (sn2 * sx - cs2 * sy) #", "* (cs2 * sx + sn2 * sy) dy2 = ext_shears * (sn2", "sy_r / (psi + rc * ql**2.0)) dx = dx_tmp * cs -", "a, x, y): r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res", "(2.0 * a)) return res #if __name__ == '__main__': #import pylab as pl", "ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc *", "/ r * (np.sqrt(r * r) - np.sqrt(1.0 * 1.0 + r *", "/ r * (np.sqrt(rc * rc + r * r) - rc -", "r def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y):", "= 1.0 / r * (np.sqrt(r * r) - np.sqrt(1.0 * 1.0 +", "sn2 = np.sin(2.0 * tr2) pot_exts = ext_shears * (sn2 * sx *", "= np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) dx2 = ext_shears *", "a, x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0", "ext_shears * (sn2 * sx * sy + 0.5 * cs2 * (sx**2.0", "ql**2.0) * sy_r / (psi + rc * ql**2.0)) pot_SIE = sx_r *", "sy * sn sy_r = -sx * sn + sy * cs psi", "/ 2.0 sx = x - x0 sy = y - y0 cs", "r) + a) return res * (x - x0) / r, res *", "* sy_r / (psi + rc * ql**2.0)) pot_SIE = sx_r * dx_tmp", "sx_r / (psi + rc)) dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 -", "sn sy_r = -sx * sn + sy * cs psi = np.sqrt(ql**2.0", "\\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc)) dy_tmp = (re", "+ sn2 * sy) dy2 = ext_shears * (sn2 * sx - cs2", "dx_tmp * cs - dy_tmp * sn dy = dx_tmp * sn +", "def potential_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): tr", "(rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 -", "cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) dx2 = ext_shears", "- x0) / r, res * (y - y0) / r def deflection_nie(x0,", "* sx + sn2 * sy) dy2 = ext_shears * (sn2 * sx", "pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a)))", "a) return res * (x - x0) / r, res * (y -", "(np.sqrt(rc * rc + r * r) - rc - np.sqrt(a * a", "= (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0)", "cs = np.cos(tr) sn = np.sin(tr) sx_r = sx * cs + sy", "* np.log((psi + rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0)) # external shear", "(x - x0) / r, res * (y - y0) / r def", "* r) - rc - np.sqrt(a * a + r * r) +", "pylab as pl #r = np.linspace(0.0, 3.0, 100) #y = 1.0 / r", "#r = np.linspace(0.0, 3.0, 100) #y = 1.0 / r * (np.sqrt(r *", "x, y): r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res =", "* np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r", "(sx_r**2.0)) # external shear tr2 = np.pi * (ext_angle / 180.0) cs2 =", "# external kappa dx3 = ext_kappa * sx dy3 = ext_kappa * sy", "r) - np.sqrt(1.0 * 1.0 + r * r) + 1.0) #pl.plot(r,y,'k-') #pl.show()", "sx - cs2 * sy) # external kappa dx3 = ext_kappa * sx", "+ sy_r * dy_tmp - 0.5 * re * \\ np.sqrt(ql) * rc", "+ r * r) + a) return res * (x - x0) /", "* sx * sy + 0.5 * cs2 * (sx**2.0 - sy**2.0)) #", "= np.sin(2.0 * tr2) dx2 = ext_shears * (cs2 * sx + sn2", "sy_r = -sx * sn + sy * cs psi = np.sqrt(ql**2.0 *", "np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re * (r -", "= np.pi * (theta / 180.0) # + np.pi / 2.0 sx =", "np def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y): r = np.sqrt((x -", "y0)**2.0) res = re / r * (np.sqrt(rc * rc + r *", "np.cos(tr) sn = np.sin(tr) sx_r = sx * cs + sy * sn", "sn + dy_tmp * cs # external shear tr2 = np.pi * (ext_angle", "psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp = (re *", "+ rc)) dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\", "np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi +", "re, rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE lens model tr =", "sx_r = sx * cs + sy * sn sy_r = -sx *", "np.pi / 2.0 sx = x - x0 sy = y - y0", "external kappa dx3 = ext_kappa * sx dy3 = ext_kappa * sy return", "* \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc)) dy_tmp =", "re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re", "r)) / (2.0 * a)) return res #if __name__ == '__main__': #import pylab", "rc, a, x, y): r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0)", "* cs psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp =", "dy3 def potential_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y):", "- ql**2.0) * sx_r / (psi + rc)) dy_tmp = (re * np.sqrt(ql)", "ql**2.0) * sy_r / (psi + rc * ql**2.0)) dx = dx_tmp *", "a) + re * a * \\ np.log((a + np.sqrt(a * a +", "1.0 / r * (np.sqrt(r * r) - np.sqrt(1.0 * 1.0 + r", "(y - y0) / r def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears,", "- y0 cs = np.cos(tr) sn = np.sin(tr) sx_r = sx * cs", "pot_SIE = sx_r * dx_tmp + sy_r * dy_tmp - 0.5 * re", "/ np.sqrt(1.0 - ql**2.0)) * \\ np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi", "cs - dy_tmp * sn dy = dx_tmp * sn + dy_tmp *", "np.sqrt(a * a + r * r) + a) + re * a", "* \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0))", "r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re *", "ql**2.0)) dx = dx_tmp * cs - dy_tmp * sn dy = dx_tmp", "* \\ np.log((a + np.sqrt(a * a + r * r)) / (2.0", "rc * np.log((psi + rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0)) # external", "ext_shears * (sn2 * sx - cs2 * sy) # external kappa dx3", "- ql**2.0) * sy_r / (psi + rc * ql**2.0)) pot_SIE = sx_r", "100) #y = 1.0 / r * (np.sqrt(r * r) - np.sqrt(1.0 *", "+ rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0)) # external shear tr2 =", "* ql**2.0)) dx = dx_tmp * cs - dy_tmp * sn dy =", "(psi + rc * ql**2.0)) dx = dx_tmp * cs - dy_tmp *", "ext_shears, ext_angle, ext_kappa, x, y): tr = np.pi * (theta / 180.0) #", "+ a) return res * (x - x0) / r, res * (y", "as np def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y): r = np.sqrt((x", "cs psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0) dx_tmp = (re", "= x - x0 sy = y - y0 cs = np.cos(tr) sn", "pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x, y): # res =", "r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re /", "def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y): r = np.sqrt((x - x0)**2.0", "pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) * 0.5 return pot_SIE + pot_exts", "/ (psi + rc)) dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0))", "+ np.pi / 2.0 sx = x - x0 sy = y -", "return res * (x - x0) / r, res * (y - y0)", "dy2 + dy3 def potential_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa,", "- cs2 * sy) # external kappa dx3 = ext_kappa * sx dy3", "(sx**2.0 - sy**2.0)) # external kappa pot_kaps = ext_kappa * (sx**2.0 + sy**2.0)", "dy = dx_tmp * sn + dy_tmp * cs # external shear tr2", "* tr2) pot_exts = ext_shears * (sn2 * sx * sy + 0.5", "* a + r * r) + a) + re * a *", "re * a * \\ np.log((a + np.sqrt(a * a + r *", "np.linspace(0.0, 3.0, 100) #y = 1.0 / r * (np.sqrt(r * r) -", "(sn2 * sx * sy + 0.5 * cs2 * (sx**2.0 - sy**2.0))", "# external kappa pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) * 0.5 return", "- dy_tmp * sn dy = dx_tmp * sn + dy_tmp * cs", "+ sy**2.0) * 0.5 return pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0,", "* np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \\ np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r", "external shear tr2 = np.pi * (ext_angle / 180.0) cs2 = np.cos(2.0 *", "re, a, x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x -", "r * (np.sqrt(r * r) - np.sqrt(1.0 * 1.0 + r * r)", "/ 180.0) # + np.pi / 2.0 sx = x - x0 sy", "(y - y0)**2.0) res = re * (r - np.sqrt(a * a +", "(cs2 * sx + sn2 * sy) dy2 = ext_shears * (sn2 *", "# SIE lens model tr = np.pi * (theta / 180.0) # +", "* tr2) sn2 = np.sin(2.0 * tr2) dx2 = ext_shears * (cs2 *", "* cs # external shear tr2 = np.pi * (ext_angle / 180.0) cs2", "np.pi * (theta / 180.0) # + np.pi / 2.0 sx = x", "ql**2.0) * sx_r / (psi + rc)) dy_tmp = (re * np.sqrt(ql) /", "ext_shears, ext_angle, ext_kappa, x, y): # SIE lens model tr = np.pi *", "* a * \\ np.log((a + np.sqrt(a * a + r * r))", "180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) pot_exts =", "r, res * (y - y0) / r def deflection_nie(x0, y0, theta, ql,", "2.0 sx = x - x0 sy = y - y0 cs =", "* rc * np.log((psi + rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0)) #", "* sy) dy2 = ext_shears * (sn2 * sx - cs2 * sy)", "sx * sy + 0.5 * cs2 * (sx**2.0 - sy**2.0)) # external", "* cs - dy_tmp * sn dy = dx_tmp * sn + dy_tmp", "x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 +", "sy**2.0) * 0.5 return pot_SIE + pot_exts + pot_kaps def pot_sub_pJaffe(x0, y0, re,", "sy_r * dy_tmp - 0.5 * re * \\ np.sqrt(ql) * rc *", "ext_kappa * sy return dx + dx2 + dx3, dy + dy2 +", "(ql**2.0)) * (sx_r**2.0)) # external shear tr2 = np.pi * (ext_angle / 180.0)", "theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE lens model", "+ sx_r**2.0) + sy_r**2.0) dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0))", "res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0)", "sx = x - x0 sy = y - y0 cs = np.cos(tr)", "dy_tmp * cs # external shear tr2 = np.pi * (ext_angle / 180.0)", "np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re / r *", "= np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re * (r", "x - x0 sy = y - y0 cs = np.cos(tr) sn =", "- x0 sy = y - y0 cs = np.cos(tr) sn = np.sin(tr)", "* (sx**2.0 + sy**2.0) * 0.5 return pot_SIE + pot_exts + pot_kaps def", "* cs2 * (sx**2.0 - sy**2.0)) # external kappa pot_kaps = ext_kappa *", "kappa pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) * 0.5 return pot_SIE +", "-sx * sn + sy * cs psi = np.sqrt(ql**2.0 * (rc**2.0 +", "rc * ql**2.0)) dx = dx_tmp * cs - dy_tmp * sn dy", "* (sn2 * sx - cs2 * sy) # external kappa dx3 =", "y): r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res = re", "pl #r = np.linspace(0.0, 3.0, 100) #y = 1.0 / r * (np.sqrt(r", "r) - rc - np.sqrt(a * a + r * r) + a)", "= np.cos(tr) sn = np.sin(tr) sx_r = sx * cs + sy *", "np.log((psi + rc)**2.0 + (1.0 - (ql**2.0)) * (sx_r**2.0)) # external shear tr2", "re * \\ np.sqrt(ql) * rc * np.log((psi + rc)**2.0 + (1.0 -", "pot_sub_pJaffe(x0, y0, re, a, x, y): # res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r =", "numpy as np def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y): r =", "+ np.sqrt(a * a + r * r)) / (2.0 * a)) return", "a * \\ np.log((a + np.sqrt(a * a + r * r)) /", "sx * cs + sy * sn sy_r = -sx * sn +", "+ dy3 def potential_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x,", "- ql**2.0) * sy_r / (psi + rc * ql**2.0)) dx = dx_tmp", "cs2 * sy) # external kappa dx3 = ext_kappa * sx dy3 =", "\\ np.sqrt(ql) * rc * np.log((psi + rc)**2.0 + (1.0 - (ql**2.0)) *", "* cs + sy * sn sy_r = -sx * sn + sy", "* sx_r / (psi + rc)) dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0", "rc * ql**2.0)) pot_SIE = sx_r * dx_tmp + sy_r * dy_tmp -", "+ pot_kaps def pot_sub_pJaffe(x0, y0, re, a, x, y): # res = #", "* (y - y0) / r def deflection_nie(x0, y0, theta, ql, re, rc,", "x0) / r, res * (y - y0) / r def deflection_nie(x0, y0,", "+ dy2 + dy3 def potential_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle,", "re * (r - np.sqrt(a * a + r * r) + a)", "# res = # re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 + (y -", "r * r) - rc - np.sqrt(a * a + r * r)", "# re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a))) r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0) res =", "#y = 1.0 / r * (np.sqrt(r * r) - np.sqrt(1.0 * 1.0", "- (ql**2.0)) * (sx_r**2.0)) # external shear tr2 = np.pi * (ext_angle /", "ext_kappa, x, y): # SIE lens model tr = np.pi * (theta /", "r * r) + a) + re * a * \\ np.log((a +", "= dx_tmp * sn + dy_tmp * cs # external shear tr2 =", "tr2 = np.pi * (ext_angle / 180.0) cs2 = np.cos(2.0 * tr2) sn2", "r * (np.sqrt(rc * rc + r * r) - rc - np.sqrt(a", "= sx * cs + sy * sn sy_r = -sx * sn", "dy_tmp * sn dy = dx_tmp * sn + dy_tmp * cs #", "= dx_tmp * cs - dy_tmp * sn dy = dx_tmp * sn", "180.0) cs2 = np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) dx2 =", "np.cos(2.0 * tr2) sn2 = np.sin(2.0 * tr2) pot_exts = ext_shears * (sn2", "+ rc * ql**2.0)) pot_SIE = sx_r * dx_tmp + sy_r * dy_tmp", "y0, re, rc, a, x, y): r = np.sqrt((x - x0)**2.0 + (y", "* a)) return res #if __name__ == '__main__': #import pylab as pl #r", "* sy) # external kappa dx3 = ext_kappa * sx dy3 = ext_kappa", "kappa dx3 = ext_kappa * sx dy3 = ext_kappa * sy return dx", "* a + r * r)) / (2.0 * a)) return res #if", "* r) + a) return res * (x - x0) / r, res", "- sy**2.0)) # external kappa pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) *", "sx_r**2.0) + sy_r**2.0) dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) *", "# external shear tr2 = np.pi * (ext_angle / 180.0) cs2 = np.cos(2.0", "(theta / 180.0) # + np.pi / 2.0 sx = x - x0", "+ 0.5 * cs2 * (sx**2.0 - sy**2.0)) # external kappa pot_kaps =", "dx3 = ext_kappa * sx dy3 = ext_kappa * sy return dx +" ]
[ "def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view()", "import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import", "from django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from django.views.generic import DetailView, ListView,", "User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return", "allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views", "model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class", "user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields = [\"name\"] def", "from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from", "UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view =", "def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps class", "GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter", "DetailView): model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view()", "adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter", "ListView): model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view()", "import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import", "class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\",", "class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view", "User fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return", "class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view", "from django.views.generic import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views", "class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView): serializer_class = TwitterLoginSerializer adapter_class = TwitterOAuthAdapter", "from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User", "adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView): serializer_class = TwitterLoginSerializer", "= User slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin,", "django.urls import reverse from django.views.generic import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import", "UpdateView): model = User fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username})", "import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import", "permanent = False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() #", "import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model", "FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter", "django.contrib.auth import get_user_model from django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from django.views.generic", "RedirectView): permanent = False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view()", "UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields = [\"name\"] def get_success_url(self): return", "from django.urls import reverse from django.views.generic import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views", "from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from", "import LoginRequiredMixin from django.urls import reverse from django.views.generic import DetailView, ListView, RedirectView, UpdateView", "kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter", "DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from", "# Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter", "FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView):", "UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User slug_field = \"username\" slug_url_kwarg = \"username\"", "RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import", "user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse(\"users:detail\",", "GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView):", "GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView", "model = User fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def", "user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User slug_field = \"username\" slug_url_kwarg", "reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView):", "class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView): serializer_class", "= UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields = [\"name\"] def get_success_url(self):", "django.views.generic import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import", "self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent =", "from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User", "Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class", "GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView): serializer_class = TwitterLoginSerializer adapter_class =", "User slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView):", "= User slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin,", "TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class", "SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model =", "UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter", "FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class =", "= FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class", "LoginRequiredMixin from django.urls import reverse from django.views.generic import DetailView, ListView, RedirectView, UpdateView from", "get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView):", "from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from", "return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin,", "fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username)", "from django.contrib.auth import get_user_model from django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from", "slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model", "GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class =", "return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class", "allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views", "from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin,", "= GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView): serializer_class = TwitterLoginSerializer adapter_class", "InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer", "import get_user_model from django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from django.views.generic import", "ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter from allauth.socialaccount.providers.google.views", "UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\":", "UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class =", "slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model", "\"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User slug_field = \"username\"", "= GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class", "= UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User slug_field = \"username\" slug_url_kwarg =", "False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps", "get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def", "class UserListView(LoginRequiredMixin, ListView): model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view", "UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username})", "= UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class", "TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = \"username\"", "from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from", "User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = \"username\" slug_url_kwarg", "= \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields =", "\"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User", "get_user_model from django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from django.views.generic import DetailView,", "model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class", "= False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social", "import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import", "allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views", "\"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User", "adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter", "Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView):", "reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class =", "django.contrib.auth.mixins import LoginRequiredMixin from django.urls import reverse from django.views.generic import DetailView, ListView, RedirectView,", "allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers", "reverse from django.views.generic import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from", "User slug_field = \"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView):", "get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = \"username\" slug_url_kwarg = \"username\"", "rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field", "= \"username\" slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model =", "import reverse from django.views.generic import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter", "import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model()", "= \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User slug_field =", "class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class", "= User fields = [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self):", "= \"username\" slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model =", "slug_url_kwarg = \"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields", "def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False", "return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self):", "import DetailView, ListView, RedirectView, UpdateView from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter", "slug_url_kwarg = \"username\" user_detail_view = UserDetailView.as_view() class UserListView(LoginRequiredMixin, ListView): model = User slug_field", "class GitHubLogin(SocialLoginView): adapter_class = GitHubOAuth2Adapter class GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class", "\"username\" user_list_view = UserListView.as_view() class UserUpdateView(LoginRequiredMixin, UpdateView): model = User fields = [\"name\"]", "= UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\":", "get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class", "= get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field = \"username\" slug_url_kwarg =", "rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView):", "UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) user_redirect_view =", "user_redirect_view = UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class GitHubLogin(SocialLoginView):", "self.request.user.username}) user_redirect_view = UserRedirectView.as_view() # Social Apps class FacebookLogin(SocialLoginView): adapter_class = FacebookOAuth2Adapter class", "UserListView(LoginRequiredMixin, ListView): model = User slug_field = \"username\" slug_url_kwarg = \"username\" user_list_view =", "kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view = UserUpdateView.as_view() class UserRedirectView(LoginRequiredMixin, RedirectView): permanent", "allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter from rest_auth.registration.views import SocialLoginView from rest_auth.social_serializers import TwitterLoginSerializer User =", "GoogleLogin(SocialLoginView): adapter_class = GoogleOAuth2Adapter class InstagramLogin(SocialLoginView): adapter_class = InstagramOAuth2Adapter class TwitterLogin(SocialLoginView): serializer_class =", "import TwitterLoginSerializer User = get_user_model() class UserDetailView(LoginRequiredMixin, DetailView): model = User slug_field =", "= [\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view", "[\"name\"] def get_success_url(self): return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username}) def get_object(self): return User.objects.get(username=self.request.user.username) user_update_view =" ]
[ "= UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int)", "+ 1.0) TV *= x NV *= y BV *= z except TypeError:", "intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs,", "for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't", "self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y) length = 1.0 degree =", "ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging a 3d view \"\"\" if self.TARGET_FNMESH", "self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return # Create new object to", "result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr,", "import cmds try: from PySide.QtGui import QApplication from PySide import QtCore except ImportError:", "QtCore except ImportError: from PySide2.QtWidgets import QApplication from PySide2 import QtCore import math", "Returns: list : 16 values for matrixs \"\"\" # Position of new object", "syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag(", "= NV.z * math.sin(rad / 2) q4 = math.cos(rad / 2) TV =", "False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1],", "< 0: degree = -degree return cathetus, degree except ZeroDivisionError: return None, None", "DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE =", "command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except:", "cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\")", "getDagPathFromScreen(x, y) # If draggin outside of objects if targetDagPath is None: return", "\"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\"", "= \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax()", "\"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\"", "try: from PySide.QtGui import QApplication from PySide import QtCore except ImportError: from PySide2.QtWidgets", "= x end_y = y cathetus = end_x - start_x opposite = end_y", "of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector", "OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint()", "Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is", "= None accelParam = None hitRayParam = None hitTriangle = None hitBary1 =", "OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint,", "= OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections = False faceIDs =", "force=True) def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x, y): \"\"\" Get distance", "idSorted = False testBothDirections = False faceIDs = None triIDs = None accelParam", "finally: matrix = [ TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x,", "self.getNewObject() # Reset transform of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0])", "ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply freeze to instances if", "long as you retain this # notice you can do whatever you want", "numOfVtx z = sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector()", "from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None", "and vector of clicked point in 3d space. Args: screen_x (int) screen_y (int)", "y tangentVector.z = z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return", "start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x end_y = y cathetus", "scale_orig[1] * (scale_plus / 100 + 1.0) z = scale_orig[2] * (scale_plus /", "normal, self.SPACE, ptr_int) normal.normalize() return normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def", "try: x = scale_orig[0] * (scale_plus / 100 + 1.0) y = scale_orig[1]", "is worth it, you can buy me a beer in return. # -<NAME>", "(OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint =", "q4 = math.cos(rad / 2) TV = TV.rotateBy(q1, q2, q3, q4) except TypeError:", "pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool by middle click", "None: return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0,", "from maya import OpenMayaMPx from maya import cmds try: from PySide.QtGui import QApplication", "None else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig", "is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context", "| self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector and tangent", "None accelParam = None hitRayParam = None hitTriangle = None hitBary1 = None", "x, y): \"\"\" Get distance and angle in screen space. \"\"\" start_x =", "return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x (int or float) y", "getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint", "\"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections = False", "except: sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return", "maya import cmds try: from PySide.QtGui import QApplication from PySide import QtCore except", "def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\"", "i in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for i in range(numOfVtx)]) /", "self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y) length = 1.0 degree = 0.0", "retain this # notice you can do whatever you want with this stuff.", "is None: return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y", "unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point and vector", "def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal vector of a face. Args:", "in 3d space. Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector", ": tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx =", ": tangent vector int : faceID \"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition)", "(OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray()", "Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint()", "Event while dragging a 3d view \"\"\" if self.TARGET_FNMESH is None: return dragPosition", "q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG)", "OP.z, 1 ] return matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent", "in return. # -<NAME> # ---------------------------------------------------------------------------- # from maya import OpenMaya from maya", "self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix is None: return # Apply", "def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list", "# notice you can do whatever you want with this stuff. If we", "False length, degree = self.getDragInfo(x, y) if qtModifier == self.CTRL: length = 1.0", "if qtModifier == self.CTRL: length = 1.0 if qtModifier == self.SHIFT: degree =", "tangentVector.z = z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a", "for i in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for i in range(numOfVtx)])", "= \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong =", "(OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int : faceID \"\"\" ptr_int", "point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x (int or float) y (int", "matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list of values which consist a new", "= 0.0 # Get new transform matrix for new object transformMatrix = self.getMatrix(", "DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER,", "numOfVtx y = sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z", "2) q4 = math.cos(rad / 2) TV = TV.rotateBy(q1, q2, q3, q4) except", "point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x),", "# Get new transform matrix for new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d,", "1.0 degree = 0.0 # Get new transform matrix for new object transformMatrix", "(OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray,", "None hitRayParam = None hitTriangle = None hitBary1 = None hitBary2 = None", "range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx z", "return normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject,", "math.sin(rad / 2) q3 = NV.z * math.sin(rad / 2) q4 = math.cos(rad", "except ImportError: from PySide2.QtWidgets import QApplication from PySide2 import QtCore import math import", "= OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted,", "= z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal", "syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return", "= None self.MOD_FIRST = None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION =", "rad = math.radians(degree_plus) q1 = NV.x * math.sin(rad / 2) q2 = NV.y", "self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr)", "MOD_FIRST self.MOD_FIRST = False length, degree = self.getDragInfo(x, y) if qtModifier == self.CTRL:", "face. Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray", "clicked point in 3d space. Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint :", "y = pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert point_in_3d, vector_in_3d = convertTo3D(x,", "vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d)", "hitBary2 = None maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection(", "origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True)", "self.ANCHOR_POINT = [x, y] # Convert point_in_3d, vector_in_3d = convertTo3D(x, y) # Get", "0: degree = -degree return cathetus, degree except ZeroDivisionError: return None, None def", "self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0]", "DRAGGER, query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers()", "the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True:", "vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0 for index in", "= OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag,", "= point - basePoint if lengthVector.length() < length: length = lengthVector.length() closestPoint =", "self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION", "PySide import QtCore except ImportError: from PySide2.QtWidgets import QApplication from PySide2 import QtCore", "kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax =", "basePoint if lengthVector.length() < length: length = lengthVector.length() closestPoint = point return closestPoint", "in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply freeze", "= self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't intersect to any geometries, return", "kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong", "True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context command", "point_in_3d, vector_in_3d = convertTo3D(x, y) # Get MFnMesh of snap target targetDagPath =", "0, OP.x, OP.y, OP.z, 1 ] return matrix def getTangent(self, faceID, targetFnMesh): \"\"\"", "ap=True) x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert", ": 16 values for matrixs \"\"\" # Position of new object OP, faceID", "False faceIDs = None triIDs = None accelParam = None hitRayParam = None", "Args: x (int or float) y (int or float) Returns: dagpath : OpenMaya.MDagPath", "want with this stuff. If we meet some day, # and you think", "self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): #", "== self.CTRL or qtModifier == self.SHIFT: # If this is the first click", "return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y = dragPosition[1]", "OpenMaya.MDagPath() if tempSel.length() == 0: return None else: tempSel.getDagPath(0, dagpath) return dagpath def", "point in viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0]", "= None self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT = None self.SPACE =", "# # ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote this", "math.sin(rad / 2) q4 = math.cos(rad / 2) TV = TV.rotateBy(q1, q2, q3,", "(OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16 values for matrixs \"\"\" # Position", "fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint", "def doIt(self, args): # Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE =", "degree_plus=0.0): \"\"\" Return a list of values which consist a new transform matrix.", "OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args:", "faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for i in range(numOfVtx)])", "rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply freeze to instances if self.InstanceFlag", "return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0])", "tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID", "# from maya import OpenMaya from maya import OpenMayaUI from maya import OpenMayaMPx", "q4) except TypeError: pass # Bitangent vector BV = TV ^ NV BV.normalize()", "self.MOD_FIRST = None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag", "TV = TV.rotateBy(q1, q2, q3, q4) except TypeError: pass # Bitangent vector BV", "maya import OpenMayaUI from maya import OpenMayaMPx from maya import cmds try: from", "to unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point and", "cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging a 3d view \"\"\" if", "sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point", "0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x, y):", "2) TV = TV.rotateBy(q1, q2, q3, q4) except TypeError: pass # Bitangent vector", "mVector (OpenMaya.MVector) Returns: list : 16 values for matrixs \"\"\" # Position of", "maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y,", "# Bitangent vector BV = TV ^ NV BV.normalize() # 4x4 Transform Matrix", "faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint :", "y = scale_orig[1] * (scale_plus / 100 + 1.0) z = scale_orig[2] *", "convertTo3D(x, y) # Get MFnMesh of snap target targetDagPath = getDagPathFromScreen(x, y) #", "is None and faceID is None: return None qtMod = QApplication.keyboardModifiers() if qtMod", "start_y # Get distance using Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2) +", "of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint", "cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix =", "If this is the first click of dragging if self.MOD_FIRST is True: self.MOD_POINT", "BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1 ] return matrix def getTangent(self, faceID,", "faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray()", "ptr_int) normal.normalize() return normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin", "*[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x,", "is None: return # Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0]", "length degree = math.degrees(math.acos(theta)) if opposite < 0: degree = -degree return cathetus,", "object to snap self.DUPLICATED = self.getNewObject() # Reset transform of current object cmds.setAttr(self.DUPLICATED", "\"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen',", "new transform matrix for new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG,", "is None: return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0,", "hitPoint, faceID else: return None, None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig,", "return None else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args:", "scale_orig[2] * (scale_plus / 100 + 1.0) TV *= x NV *= y", "this stuff. If we meet some day, # and you think this stuff", "register command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName)", "point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d,", "and angle in screen space. \"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x", "to register command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try:", "return # Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG =", "def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x, y): \"\"\" Get distance and", "= OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0 for index in vertexIndexArray: point", "return matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent vector of a", "= OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return normal # Creator def", "99999 # intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d),", "self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger())", "range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y = y tangentVector.z", "# and you think this stuff is worth it, you can buy me", "\"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax", "+ \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix(", "NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh)", "TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod == self.CTRL: try: rad", "a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int", "None: return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y =", "math.pow(opposite, 2)) try: theta = cathetus / length degree = math.degrees(math.acos(theta)) if opposite", "length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta = cathetus /", "None, None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return", "None hitTriangle = None hitBary1 = None hitBary2 = None maxParamPtr = 99999", "cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass", "%s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed", "vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return # Create new object", "normal vector of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector :", "0.0 # Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d =", "Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList,", "and faceID is None: return None qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL", "super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG =", "= argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def", "TV ^ NV BV.normalize() # 4x4 Transform Matrix try: x = scale_orig[0] *", "you want with this stuff. If we meet some day, # and you", "any geometries, return None if OP is None and faceID is None: return", "x end_y = y cathetus = end_x - start_x opposite = end_y -", "beer in return. # -<NAME> # ---------------------------------------------------------------------------- # from maya import OpenMaya from", "OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\" %", "int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x (int", "self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return # Create new object to snap", "if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to the", "hitBary1 = None hitBary2 = None maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint(", "cmds.setToolTo('selectSuperContext') return # Get clicked point in viewport screen space pressPosition = cmds.draggerContext(DRAGGER,", "^ NV BV.normalize() # 4x4 Transform Matrix try: x = scale_orig[0] * (scale_plus", "undoMode='step', cursor='hand') return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave", "OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier", "Leave the tool by middle click if button == 2: cmds.setToolTo('selectSuperContext') return #", "= self.MOD_POINT[1] end_x = x end_y = y cathetus = end_x - start_x", "TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x,", "# Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x,", "dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST =", "new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16 values", "hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is True: return", "screen_y): \"\"\" Return point and vector of clicked point in 3d space. Args:", "self.getDragInfo(x, y) if qtModifier == self.CTRL: length = 1.0 if qtModifier == self.SHIFT:", "self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod == self.CTRL: try: rad = math.radians(degree_plus)", "fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() == 0: return", "True qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier == self.SHIFT: #", "query=True, dragPoint=True) x = dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True,", "self.MOD_FIRST = True def getDragInfo(self, x, y): \"\"\" Get distance and angle in", "hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is True:", "Matrix try: x = scale_orig[0] * (scale_plus / 100 + 1.0) y =", "False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): # Parse the", "qtModifier == self.CTRL or qtModifier == self.SHIFT: # If this is the first", "else: return None, None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0):", "this is the first click of dragging if self.MOD_FIRST is True: self.MOD_POINT =", "matrix = [ TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x, BV.y,", "float) y (int or float) Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select from", "normal.normalize() return normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin =", "Get distance using Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2))", "self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST", "As long as you retain this # notice you can do whatever you", "from PySide import QtCore except ImportError: from PySide2.QtWidgets import QApplication from PySide2 import", "+ 1.0) z = scale_orig[2] * (scale_plus / 100 + 1.0) TV *=", "] return matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent vector of", "= False length, degree = self.getDragInfo(x, y) if qtModifier == self.CTRL: length =", "NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2])", "return # Create new object to snap self.DUPLICATED = self.getNewObject() # Reset transform", "def getDragInfo(self, x, y): \"\"\" Get distance and angle in screen space. \"\"\"", "math.degrees(math.acos(theta)) if opposite < 0: degree = -degree return cathetus, degree except ZeroDivisionError:", "length, degree ) if transformMatrix is None: return # Apply new transform cmds.xform(self.DUPLICATED,", "kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None", "degree ) if transformMatrix is None: return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix)", "point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix is None: return", "= NV.y * math.sin(rad / 2) q3 = NV.z * math.sin(rad / 2)", "TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV", "if targetDagPath is None: return # Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE", "= cathetus / length degree = math.degrees(math.acos(theta)) if opposite < 0: degree =", "self.MATRIX_ORIG, length, degree ) if transformMatrix is None: return # Apply new transform", "closestPoint = OpenMaya.MPoint() length = 99999.0 for index in vertexIndexArray: point = OpenMaya.MPoint()", "class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE", "= convertTo3D(x, y) # Get MFnMesh of snap target targetDagPath = getDagPathFromScreen(x, y)", "OP.x, OP.y, OP.z, 1 ] return matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return", "Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\"", "= None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False self.SHIFT =", "object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location = [-i for i in", "- start_x opposite = end_y - start_y # Get distance using Pythagorean theorem", "idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID =", "import math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\"", "dragging a 3d view \"\"\" if self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext(", "# ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote this file.", "self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return # Create", "1.0 if qtModifier == self.SHIFT: degree = 0.0 # Convert point_in_3d, vector_in_3d =", "vector and tangent vector if self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5],", "syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean)", "transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16 values for", "= 1.0 if qtModifier == self.SHIFT: degree = 0.0 # Convert point_in_3d, vector_in_3d", "\"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName)", "QtCore import math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName =", "return None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point Position", "self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector(", "kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax", "kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def", "targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod == self.CTRL: try:", "(scale_plus / 100 + 1.0) z = scale_orig[2] * (scale_plus / 100 +", "= cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool by middle click if button", "sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\"", "query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if", "OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID,", "this stuff is worth it, you can buy me a beer in return.", "None self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG = None", "def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns:", "OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False", "self.MOD_POINT[1] end_x = x end_y = y cathetus = end_x - start_x opposite", "cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x, y): \"\"\" Get", "vector of clicked point in 3d space. Args: screen_x (int) screen_y (int) Returns:", "convertTo3D(x, y) length = 1.0 degree = 0.0 # Get new transform matrix", "vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x (int or float)", "import OpenMaya from maya import OpenMayaUI from maya import OpenMayaMPx from maya import", "OpenMaya.MVector : tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx", "point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y) length", "kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class", "apply freeze to instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) #", "= None self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT =", "\"\"\" Return point and vector of clicked point in 3d space. Args: screen_x", "self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't intersect to any geometries, return None", "def convertTo3D(screen_x, screen_y): \"\"\" Return point and vector of clicked point in 3d", "NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod", "dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh)", "me a beer in return. # -<NAME> # ---------------------------------------------------------------------------- # from maya import", "object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't intersect to", "view \"\"\" if self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True)", "= math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta = cathetus / length", "None self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH = None", "self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH", "if result is True: return hitPoint, faceID else: return None, None def getMatrix(self,", "faceIDs = None triIDs = None accelParam = None hitRayParam = None hitTriangle", "if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag", "click of dragging if self.MOD_FIRST is True: self.MOD_POINT = [x, y] # global", "targetDagPath is None: return # Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE +", "can buy me a beer in return. # -<NAME> # ---------------------------------------------------------------------------- # from", "syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None", "# Apply transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE,", "initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register", "== self.SHIFT: # If this is the first click of dragging if self.MOD_FIRST", "a point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint :", "# Create new object to snap self.DUPLICATED = self.getNewObject() # Reset transform of", "-degree return cathetus, degree except ZeroDivisionError: return None, None def getIntersection(self, point_in_3d, vector_in_3d,", "you can buy me a beer in return. # -<NAME> # ---------------------------------------------------------------------------- #", "cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step',", "import OpenMayaUI from maya import OpenMayaMPx from maya import cmds try: from PySide.QtGui", "to instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix", "= OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName) def", "x = sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for", "OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel)", "vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x (int or float) y (int or", "getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point Position of intersection.. Args: point_in_3d", "# Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or", "transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self):", "None hitBary2 = None maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint( result =", "OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector()", "information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH =", "- basePoint if lengthVector.length() < length: length = lengthVector.length() closestPoint = point return", "except TypeError: pass # Bitangent vector BV = TV ^ NV BV.normalize() #", "== (self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector", "self.CTRL or qtModifier == self.SHIFT: # If this is the first click of", "q2 = NV.y * math.sin(rad / 2) q3 = NV.z * math.sin(rad /", "self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier ==", "cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True", "getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector and tangent vector if self.ROTATION is", "is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag,", "values which consist a new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns:", "matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold", "self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x end_y = y cathetus = end_x", "self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector and tangent vector", "args): # Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if", "= OpenMaya.MPoint() length = 99999.0 for index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index,", "a 3d view \"\"\" if self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext( DRAGGER,", "faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2)", "for matrixs \"\"\" # Position of new object OP, faceID = self.getIntersection(mPoint, mVector,", "which consist a new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list", "geometries, return None if OP is None and faceID is None: return None", "self.SHIFT: degree = 0.0 # Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else:", "matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent vector of a face.", "OP is None and faceID is None: return None qtMod = QApplication.keyboardModifiers() if", "scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH", "BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1 ] return matrix def getTangent(self,", "length = 1.0 if qtModifier == self.SHIFT: degree = 0.0 # Convert point_in_3d,", "by middle click if button == 2: cmds.setToolTo('selectSuperContext') return # Get clicked point", "OpenMayaMPx from maya import cmds try: from PySide.QtGui import QApplication from PySide import", "OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if lengthVector.length() < length: length = lengthVector.length()", "= y cathetus = end_x - start_x opposite = end_y - start_y #", "* (scale_plus / 100 + 1.0) y = scale_orig[1] * (scale_plus / 100", "4x4 Transform Matrix try: x = scale_orig[0] * (scale_plus / 100 + 1.0)", "If draggin outside of objects if targetDagPath is None: return # Get origianl", "mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName) raise def", "qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier == self.SHIFT: # If", "1 ] return matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent vector", "Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int : faceID", "in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint", "matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if", "space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT", "y cathetus = end_x - start_x opposite = end_y - start_y # Get", "QApplication from PySide import QtCore except ImportError: from PySide2.QtWidgets import QApplication from PySide2", "middle click if button == 2: cmds.setToolTo('selectSuperContext') return # Get clicked point in", "Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag(", "[x, y] # global MOD_FIRST self.MOD_FIRST = False length, degree = self.getDragInfo(x, y)", "= self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return #", "100 + 1.0) y = scale_orig[1] * (scale_plus / 100 + 1.0) z", "pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return", "a beer in return. # -<NAME> # ---------------------------------------------------------------------------- # from maya import OpenMaya", "Apply transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0]", "None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point Position of intersection..", "\"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return", "# Get distance using Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite,", "\"\"\" Return a normal vector of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh)", "== self.SHIFT: degree = 0.0 # Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1])", "0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\"", "= None hitBary1 = None hitBary2 = None maxParamPtr = 99999 # intersectPoint", "for i in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y", "query=True, button=True) # Leave the tool by middle click if button == 2:", "wrote this file. As long as you retain this # notice you can", "from PySide2 import QtCore import math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL =", "OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\"", "OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1,", "= True qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier == self.SHIFT:", "qtModifier == self.SHIFT: # If this is the first click of dragging if", "while dragging a 3d view \"\"\" if self.TARGET_FNMESH is None: return dragPosition =", "meet some day, # and you think this stuff is worth it, you", "Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray", "%s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point and vector of clicked", "matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV =", "99999.0 for index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector =", "of snap target targetDagPath = getDagPathFromScreen(x, y) # If draggin outside of objects", "+ \".translate\", *[0, 0, 0]) location = [-i for i in cmds.xform(self.DUPLICATED, q=True,", "def dragEvent(self): \"\"\" Event while dragging a 3d view \"\"\" if self.TARGET_FNMESH is", "or qtModifier == self.SHIFT: # If this is the first click of dragging", "Create new object to snap self.DUPLICATED = self.getNewObject() # Reset transform of current", "(int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray)", "NV.z * math.sin(rad / 2) q4 = math.cos(rad / 2) TV = TV.rotateBy(q1,", "If it doesn't intersect to any geometries, return None if OP is None", "Ctrl-hold rotation if qtMod == self.CTRL: try: rad = math.radians(degree_plus) q1 = NV.x", "faceID \"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin,", "OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None:", "OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return normal # Creator def cmdCreator():", "faceID else: return None, None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1,", "def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool by middle", "if tempSel.length() == 0: return None else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig,", "worth it, you can buy me a beer in return. # -<NAME> #", "= True def getDragInfo(self, x, y): \"\"\" Get distance and angle in screen", "= OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return normal", "targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int : faceID \"\"\" ptr_int =", "self.CTRL: length = 1.0 if qtModifier == self.SHIFT: degree = 0.0 # Convert", ") if transformMatrix is None: return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED", "the first click of dragging if self.MOD_FIRST is True: self.MOD_POINT = [x, y]", "mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list of values", "this file. As long as you retain this # notice you can do", "= dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier", "(int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d =", "of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location = [-i for", "is None: return None qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT):", "self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): # Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(),", "point - basePoint if lengthVector.length() < length: length = lengthVector.length() closestPoint = point", "vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x,", "self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d,", "releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x, y): \"\"\" Get distance and angle", "(self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector and", "OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0)", "def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging a 3d", "hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is True: return hitPoint, faceID else:", "# \"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote this file. As long", "from PySide2.QtWidgets import QApplication from PySide2 import QtCore import math import sys DRAGGER", "import QtCore import math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName", "== self.CTRL: try: rad = math.radians(degree_plus) q1 = NV.x * math.sin(rad / 2)", "if transformMatrix is None: return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED +", "OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand):", "sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for i in", "hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections = False faceIDs = None triIDs", "Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\"", "point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint)", "faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID,", "3d view \"\"\" if self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext( DRAGGER, query=True,", "cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject):", "argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger", "= UTIL.getInt(hitFacePtr) if result is True: return hitPoint, faceID else: return None, None", "# Get dagpath, or return None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath", "PySide.QtGui import QApplication from PySide import QtCore except ImportError: from PySide2.QtWidgets import QApplication", "a normal vector of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector", "/ 2) q2 = NV.y * math.sin(rad / 2) q3 = NV.z *", ": OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint =", "releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True,", "hitTriangle = None hitBary1 = None hitBary2 = None maxParamPtr = 99999 #", "numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y = y tangentVector.z = z", "new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree )", "= TV ^ NV BV.normalize() # 4x4 Transform Matrix try: x = scale_orig[0]", "args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if", "maya import OpenMayaMPx from maya import cmds try: from PySide.QtGui import QApplication from", "fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint", "OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def", "self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag)", "values for matrixs \"\"\" # Position of new object OP, faceID = self.getIntersection(mPoint,", "this # notice you can do whatever you want with this stuff. If", "index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point -", "OpenMayaUI from maya import OpenMayaMPx from maya import cmds try: from PySide.QtGui import", "as you retain this # notice you can do whatever you want with", "True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args):", "face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int :", "return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except:", "None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() ==", "matrix for new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length,", "maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if", "= \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax", "QtCore.Qt.ControlModifier def doIt(self, args): # Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE", "matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16 values for matrixs", "outside of objects if targetDagPath is None: return # Get origianl scale information", "notice you can do whatever you want with this stuff. If we meet", "Get normal vector and tangent vector if self.ROTATION is False: NV = OpenMaya.MVector(", "*location) # Can't apply freeze to instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED,", "0: return None else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\"", "except ZeroDivisionError: return None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a", "tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal vector of", "whatever you want with this stuff. If we meet some day, # and", "if qtMod == (self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) # Get", "cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST", "doesn't intersect to any geometries, return None if OP is None and faceID", "syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def", "# Get normal vector and tangent vector if self.ROTATION is False: NV =", "tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() == 0: return None", "matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST =", "pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert point_in_3d, vector_in_3d =", "OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None if fails tempSel = OpenMaya.MSelectionList()", "button == 2: cmds.setToolTo('selectSuperContext') return # Get clicked point in viewport screen space", "NV.y * math.sin(rad / 2) q3 = NV.z * math.sin(rad / 2) q4", "dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger", "# # # ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote", "self.MOD_FIRST is True: self.MOD_POINT = [x, y] # global MOD_FIRST self.MOD_FIRST = False", "tangent vector if self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize()", "\".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self,", "degree = 0.0 # Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d,", "getDragInfo(self, x, y): \"\"\" Get distance and angle in screen space. \"\"\" start_x", "2) q3 = NV.z * math.sin(rad / 2) q4 = math.cos(rad / 2)", "*= z except TypeError: pass finally: matrix = [ TV.x, TV.y, TV.z, 0,", "of values which consist a new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector)", "space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True)", "# Get clicked point in viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True)", "OP.y, OP.z, 1 ] return matrix def getTangent(self, faceID, targetFnMesh): \"\"\" Return a", "Get distance and angle in screen space. \"\"\" start_x = self.MOD_POINT[0] start_y =", "UTIL.getInt(hitFacePtr) if result is True: return hitPoint, faceID else: return None, None def", "import OpenMayaMPx from maya import cmds try: from PySide.QtGui import QApplication from PySide", "self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG = None", "cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG,", "current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location = [-i for i", "point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return # Create new", "transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix", "mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16 values for matrixs \"\"\" #", "dagpath = OpenMaya.MDagPath() if tempSel.length() == 0: return None else: tempSel.getDagPath(0, dagpath) return", "matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV", "if opposite < 0: degree = -degree return cathetus, degree except ZeroDivisionError: return", "= scale_orig[0] * (scale_plus / 100 + 1.0) y = scale_orig[1] * (scale_plus", "OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return normal #", "\"\"\" if self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x", "targetDagPath = getDagPathFromScreen(x, y) # If draggin outside of objects if targetDagPath is", "i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply", "vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length =", "cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self):", "return. # -<NAME> # ---------------------------------------------------------------------------- # from maya import OpenMaya from maya import", "vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr()", "\"\"\" Return a tangent vector of a face. Args: faceID (int) mVector (OpenMaya.MVector)", "end_y - start_y # Get distance using Pythagorean theorem length = math.sqrt( math.pow(cathetus,", "point_in_3d, vector_in_3d = convertTo3D(x, y) length = 1.0 degree = 0.0 # Get", "self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to the new", "= False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): # Parse", "42): # <<EMAIL>> wrote this file. As long as you retain this #", "modifier == \"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL", "transform of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location = [-i", "x tangentVector.y = y tangentVector.z = z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition,", "buy me a beer in return. # -<NAME> # ---------------------------------------------------------------------------- # from maya", "targetFnMesh) # Ctrl-hold rotation if qtMod == self.CTRL: try: rad = math.radians(degree_plus) q1", "= fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections,", "scale_plus=1, degree_plus=0.0): \"\"\" Return a list of values which consist a new transform", "point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if lengthVector.length() < length: length =", "= \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong =", "\"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x end_y = y", "and you think this stuff is worth it, you can buy me a", "in viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y", "z = scale_orig[2] * (scale_plus / 100 + 1.0) TV *= x NV", "fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0 for index", "sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for i in", "# 4x4 Transform Matrix try: x = scale_orig[0] * (scale_plus / 100 +", "cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x, y]", "fnMesh): \"\"\" Return a point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector)", "faceID, targetFnMesh): \"\"\" Return a tangent vector of a face. Args: faceID (int)", "OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for i", "= getDagPathFromScreen(x, y) # If draggin outside of objects if targetDagPath is None:", "# Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try:", "# # duplicaeOnSurface.py # # # ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42):", "dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool by", "= True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self,", "NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1 ] return", "== 0: return None else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh):", "dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int)", "uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\" %", "file. As long as you retain this # notice you can do whatever", "= OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL =", "if self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV =", "tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length()", "kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag(", "self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix is None: return # Apply new", "hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections =", "not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to the new object cmds.xform(self.DUPLICATED,", "<reponame>minoue/miMayaPlugins<gh_stars>10-100 # # duplicaeOnSurface.py # # # ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision", "matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh)", "BV *= z except TypeError: pass finally: matrix = [ TV.x, TV.y, TV.z,", "/ 100 + 1.0) z = scale_orig[2] * (scale_plus / 100 + 1.0)", "triIDs = None accelParam = None hitRayParam = None hitTriangle = None hitBary1", "self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL", "end_x - start_x opposite = end_y - start_y # Get distance using Pythagorean", "kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag", "return None, None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\"", "screen space. \"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x end_y", "\"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong,", "screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint()", "distance using Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try:", "vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x", "<<EMAIL>> wrote this file. As long as you retain this # notice you", "apply=True, t=True) # Apply transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self):", "self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT = None self.SPACE", "TV.rotateBy(q1, q2, q3, q4) except TypeError: pass # Bitangent vector BV = TV", "NV BV.normalize() # 4x4 Transform Matrix try: x = scale_orig[0] * (scale_plus /", "(scale_plus / 100 + 1.0) TV *= x NV *= y BV *=", "if qtModifier == self.SHIFT: degree = 0.0 # Convert point_in_3d, vector_in_3d = convertTo3D(", "day, # and you think this stuff is worth it, you can buy", "math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta = cathetus / length degree =", "targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list of values which consist", "+ math.pow(opposite, 2)) try: theta = cathetus / length degree = math.degrees(math.acos(theta)) if", "y) length = 1.0 degree = 0.0 # Get new transform matrix for", "= scale_orig[2] * (scale_plus / 100 + 1.0) TV *= x NV *=", "vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix is None: return #", "tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x", "hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is True: return hitPoint,", "mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command:", "do whatever you want with this stuff. If we meet some day, #", "% kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to", "Return a normal vector of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns:", "% kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point and vector of clicked point", "self.SHIFT: # If this is the first click of dragging if self.MOD_FIRST is", "screen_x (int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d", "vector BV = TV ^ NV BV.normalize() # 4x4 Transform Matrix try: x", "UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag", "Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName,", "\"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath,", "= 99999.0 for index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector", "vector if self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV", "try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName) raise", "= OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag,", "cursor='hand') return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the", "# Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean)", "kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point and vector of clicked point in", "OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__()", "stuff is worth it, you can buy me a beer in return. #", "OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() == 0: return None else: tempSel.getDagPath(0,", "except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand')", "Get dagpath, or return None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath =", "PySide2.QtWidgets import QApplication from PySide2 import QtCore import math import sys DRAGGER =", "/ numOfVtx z = sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx tangentVector =", "return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED =", "can do whatever you want with this stuff. If we meet some day,", "else: point_in_3d, vector_in_3d = convertTo3D(x, y) length = 1.0 degree = 0.0 #", "accelParam = None hitRayParam = None hitTriangle = None hitBary1 = None hitBary2", "= self.getDragInfo(x, y) if qtModifier == self.CTRL: length = 1.0 if qtModifier ==", "hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is True: return hitPoint, faceID else: return", "OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED", "\"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier", "q3, q4) except TypeError: pass # Bitangent vector BV = TV ^ NV", "Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER,", "Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16 values for matrixs \"\"\"", "maya import OpenMaya from maya import OpenMayaUI from maya import OpenMayaMPx from maya", "UTIL.asIntPtr() idSorted = False testBothDirections = False faceIDs = None triIDs = None", "space. Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d", "int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None if fails tempSel", "= OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is", "\"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\"", "vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y) length =", "cathetus = end_x - start_x opposite = end_y - start_y # Get distance", "point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle,", "return None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length()", "= False testBothDirections = False faceIDs = None triIDs = None accelParam =", "= cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST = True qtModifier", "None maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x,", "None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False", "= OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0", "# Can't apply freeze to instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True,", "Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d =", "If we meet some day, # and you think this stuff is worth", "to any geometries, return None if OP is None and faceID is None:", "freeze to instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply", "or float) y (int or float) Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select", "OpenMaya.MPoint() length = 99999.0 for index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point,", "/ numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y = y tangentVector.z =", "== 2: cmds.setToolTo('selectSuperContext') return # Get clicked point in viewport screen space pressPosition", "= QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): # Parse the arguments. argData", "y = sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for", "Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig)", "NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1 ]", "\"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal,", "self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx", "x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert point_in_3d,", "None: return None qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT): OP", "OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0 for", "modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST = True", "end_y = y cathetus = end_x - start_x opposite = end_y - start_y", "+ \".translate\", *location) # Can't apply freeze to instances if self.InstanceFlag is not", "TypeError: pass finally: matrix = [ TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z,", "2) q2 = NV.y * math.sin(rad / 2) q3 = NV.z * math.sin(rad", "y] # global MOD_FIRST self.MOD_FIRST = False length, degree = self.getDragInfo(x, y) if", "True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix)", "point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x (int or", "mVector, targetFnMesh) # If it doesn't intersect to any geometries, return None if", "or return None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if", "= OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV =", "math.cos(rad / 2) TV = TV.rotateBy(q1, q2, q3, q4) except TypeError: pass #", "NV *= y BV *= z except TypeError: pass finally: matrix = [", "matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging a", "is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0],", "1.0) y = scale_orig[1] * (scale_plus / 100 + 1.0) z = scale_orig[2]", "screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y = pressPosition[1]", "= pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert point_in_3d, vector_in_3d", "# Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag)", "= end_y - start_y # Get distance using Pythagorean theorem length = math.sqrt(", "OP = getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector and tangent vector if", "= [x, y] # global MOD_FIRST self.MOD_FIRST = False length, degree = self.getDragInfo(x,", "kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister", "Convert point_in_3d, vector_in_3d = convertTo3D(x, y) # Get MFnMesh of snap target targetDagPath", "\"\"\" Event while dragging a 3d view \"\"\" if self.TARGET_FNMESH is None: return", "Get MFnMesh of snap target targetDagPath = getDagPathFromScreen(x, y) # If draggin outside", "def getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent vector of a face. Args:", "argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True:", "point in 3d space. Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d", "-<NAME> # ---------------------------------------------------------------------------- # from maya import OpenMaya from maya import OpenMayaUI from", "# Leave the tool by middle click if button == 2: cmds.setToolTo('selectSuperContext') return", "a tangent vector of a face. Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector", "self.MOD_FIRST = False length, degree = self.getDragInfo(x, y) if qtModifier == self.CTRL: length", "= OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() == 0: return None else:", "def setupDragger(self): \"\"\" Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger", "in screen space. \"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x", "modifier=True) if modifier == \"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if qtModifier", "def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE = None", "x (int or float) y (int or float) Returns: dagpath : OpenMaya.MDagPath \"\"\"", "cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply freeze to instances if self.InstanceFlag is", "y BV *= z except TypeError: pass finally: matrix = [ TV.x, TV.y,", "end_x = x end_y = y cathetus = end_x - start_x opposite =", "degree = -degree return cathetus, degree except ZeroDivisionError: return None, None def getIntersection(self,", "point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint", "= 99999 # intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z),", "*[0, 0, 0]) location = [-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)]", "if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() == 0:", "new object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't intersect", "TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x, OP.y,", "pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int : faceID \"\"\"", "self.ROTATION = True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def", "mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\"", "# If draggin outside of objects if targetDagPath is None: return # Get", "100 + 1.0) z = scale_orig[2] * (scale_plus / 100 + 1.0) TV", "snap self.DUPLICATED = self.getNewObject() # Reset transform of current object cmds.setAttr(self.DUPLICATED + \".translate\",", "= TV.rotateBy(q1, q2, q3, q4) except TypeError: pass # Bitangent vector BV =", "self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix is None:", "duplicaeOnSurface.py # # # ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>>", "dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER,", "matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP,", "list : 16 values for matrixs \"\"\" # Position of new object OP,", "= \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong =", "degree = math.degrees(math.acos(theta)) if opposite < 0: degree = -degree return cathetus, degree", "= cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def", "def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to", "normal vector and tangent vector if self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4],", "Return a point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint", "# -<NAME> # ---------------------------------------------------------------------------- # from maya import OpenMaya from maya import OpenMayaUI", "to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self):", "tangentArray, self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for i in range(numOfVtx)]) /", "theta = cathetus / length degree = math.degrees(math.acos(theta)) if opposite < 0: degree", "cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool by middle click if button ==", "OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0 for index in vertexIndexArray: point =", "# Reset transform of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location", "* math.sin(rad / 2) q2 = NV.y * math.sin(rad / 2) q3 =", "math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta = cathetus / length degree", "__init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG", "Can't apply freeze to instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True)", "or float) Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x),", "cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location = [-i for i in cmds.xform(self.DUPLICATED,", "# ---------------------------------------------------------------------------- # from maya import OpenMaya from maya import OpenMayaUI from maya", "fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam,", "True def getDragInfo(self, x, y): \"\"\" Get distance and angle in screen space.", "OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if lengthVector.length() < length:", "if qtMod == self.CTRL: try: rad = math.radians(degree_plus) q1 = NV.x * math.sin(rad", "origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return", "LICENSE\" (Revision 42): # <<EMAIL>> wrote this file. As long as you retain", "= cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x,", "[-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) #", "OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath() if tempSel.length() == 0: return None else: tempSel.getDagPath(0, dagpath)", "Return point and vector of clicked point in 3d space. Args: screen_x (int)", "dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent,", "BV.normalize() # 4x4 Transform Matrix try: x = scale_orig[0] * (scale_plus / 100", "NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1 ] return matrix", "= convertTo3D(x, y) length = 1.0 degree = 0.0 # Get new transform", "tangentVector.y = y tangentVector.z = z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh):", "cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply freeze to", "rotation if qtMod == self.CTRL: try: rad = math.radians(degree_plus) q1 = NV.x *", "= OpenMaya.MDagPath() if tempSel.length() == 0: return None else: tempSel.getDagPath(0, dagpath) return dagpath", "dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y = dragPosition[1] modifier", "\"\"\" Return a point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns:", "/ 2) q4 = math.cos(rad / 2) TV = TV.rotateBy(q1, q2, q3, q4)", "(OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted", "TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation", "argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag =", "None self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST = None", "None self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld", "None self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT = None", "== self.CTRL: length = 1.0 if qtModifier == self.SHIFT: degree = 0.0 #", "False testBothDirections = False faceIDs = None triIDs = None accelParam = None", "self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context command \"\"\"", "import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag =", "result is True: return hitPoint, faceID else: return None, None def getMatrix(self, mPoint,", "if qtModifier == self.CTRL or qtModifier == self.SHIFT: # If this is the", "2)) try: theta = cathetus / length degree = math.degrees(math.acos(theta)) if opposite <", "try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y):", ": hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections", "faceID = UTIL.getInt(hitFacePtr) if result is True: return hitPoint, faceID else: return None,", "vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if", "= None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag =", "if transformMatrix is None: return # Create new object to snap self.DUPLICATED =", "ImportError: from PySide2.QtWidgets import QApplication from PySide2 import QtCore import math import sys", "True: self.MOD_POINT = [x, y] # global MOD_FIRST self.MOD_FIRST = False length, degree", "for index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point", "kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface,", "targetFnMesh) # Get normal vector and tangent vector if self.ROTATION is False: NV", "you can do whatever you want with this stuff. If we meet some", "Get new transform matrix for new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH,", "point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam,", "in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y = y", "it doesn't intersect to any geometries, return None if OP is None and", "we meet some day, # and you think this stuff is worth it,", "\"\"\" # Position of new object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) #", "targetFnMesh) # If it doesn't intersect to any geometries, return None if OP", "kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" #", "length = 99999.0 for index in vertexIndexArray: point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld)", "= sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for i", "space. \"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x end_y =", "2: cmds.setToolTo('selectSuperContext') return # Get clicked point in viewport screen space pressPosition =", "transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def", "instances if self.InstanceFlag is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to", "= None hitBary2 = None maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint( result", "in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx", "cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext(", "range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx tangentVector", "\"\"\" Args: x (int or float) y (int or float) Returns: dagpath :", "x = dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if", "16 values for matrixs \"\"\" # Position of new object OP, faceID =", "= OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d", "qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID,", "= \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag =", "= OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize()", "ZeroDivisionError: return None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point", "targetFnMesh): \"\"\" Return a normal vector of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh", "from maya import OpenMaya from maya import OpenMayaUI from maya import OpenMayaMPx from", "self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix", "# Position of new object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If", ": vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d,", "kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self):", "= self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod ==", "closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint", "Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted =", "int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y): \"\"\" Args: x", "= self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if transformMatrix is", "scale_orig[0] * (scale_plus / 100 + 1.0) y = scale_orig[1] * (scale_plus /", "= None self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST =", "is not True: cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to the new object", "None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a", "= OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\"", "try: theta = cathetus / length degree = math.degrees(math.acos(theta)) if opposite < 0:", "it, you can buy me a beer in return. # -<NAME> # ----------------------------------------------------------------------------", "self.CTRL: try: rad = math.radians(degree_plus) q1 = NV.x * math.sin(rad / 2) q2", "\"\"\" Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext(", "return hitPoint, faceID else: return None, None def getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig,", "triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID", "self.DUPLICATED = self.getNewObject() # Reset transform of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0,", "except: sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin =", "OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None if fails", "projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) #", "NV.x * math.sin(rad / 2) q2 = NV.y * math.sin(rad / 2) q3", "cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging", "Get clicked point in viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x", "\"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator", "Reset transform of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0, 0]) location =", "= -degree return cathetus, degree except ZeroDivisionError: return None, None def getIntersection(self, point_in_3d,", "= OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for", "# Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True,", "new object to snap self.DUPLICATED = self.getNewObject() # Reset transform of current object", "y) # Get MFnMesh of snap target targetDagPath = getDagPathFromScreen(x, y) # If", "= tangentArray.length() x = sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx y =", "is True: return hitPoint, faceID else: return None, None def getMatrix(self, mPoint, mVector,", "distance and angle in screen space. \"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1]", "object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree ) if", "if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup", "(scale_plus / 100 + 1.0) y = scale_orig[1] * (scale_plus / 100 +", "x NV *= y BV *= z except TypeError: pass finally: matrix =", "dragEvent(self): \"\"\" Event while dragging a 3d view \"\"\" if self.TARGET_FNMESH is None:", "/ 2) TV = TV.rotateBy(q1, q2, q3, q4) except TypeError: pass # Bitangent", "scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list of values which consist a", "Bitangent vector BV = TV ^ NV BV.normalize() # 4x4 Transform Matrix try:", "normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return normal # Creator", "import QtCore except ImportError: from PySide2.QtWidgets import QApplication from PySide2 import QtCore import", "0, BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1 ] return matrix def", "y] # Convert point_in_3d, vector_in_3d = convertTo3D(x, y) # Get MFnMesh of snap", "pointPosition, targetFnMesh): \"\"\" Return a normal vector of a face. Args: pointPosition (OpenMaya.MFloatPoint)", "+ 1.0) y = scale_orig[1] * (scale_plus / 100 + 1.0) z =", "tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y = y tangentVector.z = z tangentVector.normalize()", "math.radians(degree_plus) q1 = NV.x * math.sin(rad / 2) q2 = NV.y * math.sin(rad", "= QtCore.Qt.ControlModifier def doIt(self, args): # Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args)", "kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong", "OpenMaya from maya import OpenMayaUI from maya import OpenMayaMPx from maya import cmds", "qtModifier == self.CTRL: length = 1.0 if qtModifier == self.SHIFT: degree = 0.0", "[ TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0,", "(OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr =", "QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier == self.SHIFT: # If this is", "dragging if self.MOD_FIRST is True: self.MOD_POINT = [x, y] # global MOD_FIRST self.MOD_FIRST", "TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z,", "return None qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT): OP =", "= self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod == self.CTRL: try: rad =", "# <<EMAIL>> wrote this file. As long as you retain this # notice", "1.0) TV *= x NV *= y BV *= z except TypeError: pass", "None hitBary1 = None hitBary2 = None maxParamPtr = 99999 # intersectPoint =", "y) # If draggin outside of objects if targetDagPath is None: return #", "/ 100 + 1.0) TV *= x NV *= y BV *= z", "syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT", "/ numOfVtx y = sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx z =", "# duplicaeOnSurface.py # # # ---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42): #", "getTangent(self, faceID, targetFnMesh): \"\"\" Return a tangent vector of a face. Args: faceID", "tangentArray.length() x = sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y", "None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point Position of", "OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x,", "start_x opposite = end_y - start_y # Get distance using Pythagorean theorem length", "# If it doesn't intersect to any geometries, return None if OP is", "# If this is the first click of dragging if self.MOD_FIRST is True:", "qtMod == self.CTRL: try: rad = math.radians(degree_plus) q1 = NV.x * math.sin(rad /", "None qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT): OP = getClosestVertex(OP,", "y (int or float) Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select from screen", "OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong,", "q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location) # Can't apply freeze to instances", "a face. Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\"", "faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't intersect to any geometries,", "y) if qtModifier == self.CTRL: length = 1.0 if qtModifier == self.SHIFT: degree", "= math.degrees(math.acos(theta)) if opposite < 0: degree = -degree return cathetus, degree except", "viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y =", "targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize() return normal # Creator def cmdCreator(): return", "point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\" vertexIndexArray =", "pass # Bitangent vector BV = TV ^ NV BV.normalize() # 4x4 Transform", "# Convert point_in_3d, vector_in_3d = convertTo3D(x, y) # Get MFnMesh of snap target", "a list of values which consist a new transform matrix. Args: mPoint (OpenMaya.MPoint)", "z = sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x", "pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT =", "a new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list : 16", "Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True)", "(int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents(", "int : faceID \"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector()", "query=True, ap=True) x = pressPosition[0] y = pressPosition[1] self.ANCHOR_POINT = [x, y] #", "return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging a 3d view \"\"\"", "(OpenMaya.MVector) Returns: list : 16 values for matrixs \"\"\" # Position of new", "if self.TARGET_FNMESH is None: return dragPosition = cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x =", "convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y) length = 1.0 degree", "argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION =", "* (scale_plus / 100 + 1.0) TV *= x NV *= y BV", "pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self): button =", "degree except ZeroDivisionError: return None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return", "syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean)", "of dragging if self.MOD_FIRST is True: self.MOD_POINT = [x, y] # global MOD_FIRST", "cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST = True qtModifier =", "[x, y] # Convert point_in_3d, vector_in_3d = convertTo3D(x, y) # Get MFnMesh of", "new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event", "i in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x tangentVector.y =", "= [-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\", *location)", "Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta =", "= self.getNewObject() # Reset transform of current object cmds.setAttr(self.DUPLICATED + \".translate\", *[0, 0,", "return dragger def pressEvent(self): button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool", "---------------------------------------------------------------------------- # \"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote this file. As", "= [ TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z,", "float) Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y),", "and tangent vector if self.ROTATION is False: NV = OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6])", "= cmds.draggerContext( DRAGGER, query=True, dragPoint=True) x = dragPosition[0] y = dragPosition[1] modifier =", "None and faceID is None: return None qtMod = QApplication.keyboardModifiers() if qtMod ==", "= convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y) length = 1.0", "from maya import OpenMayaUI from maya import OpenMayaMPx from maya import cmds try:", "Position of new object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If it", "argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self):", "you retain this # notice you can do whatever you want with this", "using Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta", "z except TypeError: pass finally: matrix = [ TV.x, TV.y, TV.z, 0, NV.x,", "self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False self.SHIFT", "from maya import cmds try: from PySide.QtGui import QApplication from PySide import QtCore", "origin, normal, self.SPACE, ptr_int) normal.normalize() return normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface())", "targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for i in", "target targetDagPath = getDagPathFromScreen(x, y) # If draggin outside of objects if targetDagPath", "vector of a face. Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent", "with this stuff. If we meet some day, # and you think this", "= False faceIDs = None triIDs = None accelParam = None hitRayParam =", "doIt(self, args): # Parse the arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0)", "= QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh)", "return None if OP is None and faceID is None: return None qtMod", "OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\"", "= OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y), point_in_3d, vector_in_3d) return point_in_3d, vector_in_3d def getDagPathFromScreen(x, y):", "cathetus, degree except ZeroDivisionError: return None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\"", "BV.z, 0, OP.x, OP.y, OP.z, 1 ] return matrix def getTangent(self, faceID, targetFnMesh):", "= scale_orig[1] * (scale_plus / 100 + 1.0) z = scale_orig[2] * (scale_plus", "in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx", "mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list of values which", "ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE,", "tempSel.length() == 0: return None else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID,", "dragPoint=True) x = dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True)", "clicked point in viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True) x =", "Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True,", "= self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x = x end_y = y cathetus =", "OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't intersect to any", "None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True self.InstanceFlag = False self.SHIFT = QtCore.Qt.ShiftModifier", "def getIntersection(self, point_in_3d, vector_in_3d, fnMesh): \"\"\" Return a point Position of intersection.. Args:", "\"\"\" Get distance and angle in screen space. \"\"\" start_x = self.MOD_POINT[0] start_y", "qtMod == (self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) # Get normal", "qtModifier == self.SHIFT: degree = 0.0 # Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0],", "Return a list of values which consist a new transform matrix. Args: mPoint", "some day, # and you think this stuff is worth it, you can", "= sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x =", "location = [-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED + \".translate\",", "argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER)", "Returns: OpenMaya.MVector : tangent vector int : faceID \"\"\" ptr_int = UTIL.asIntPtr() origin", "is the first click of dragging if self.MOD_FIRST is True: self.MOD_POINT = [x,", "math.sin(rad / 2) q2 = NV.y * math.sin(rad / 2) q3 = NV.z", "faceID, targetFnMesh) # Get normal vector and tangent vector if self.ROTATION is False:", "raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command:", "= x tangentVector.y = y tangentVector.z = z tangentVector.normalize() return tangentVector def getNormal(self,", ": OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) #", "if OP is None and faceID is None: return None qtMod = QApplication.keyboardModifiers()", "return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh", "except TypeError: pass finally: matrix = [ TV.x, TV.y, TV.z, 0, NV.x, NV.y,", "0.0 # Get new transform matrix for new object transformMatrix = self.getMatrix( point_in_3d,", "self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath)", "consist a new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector (OpenMaya.MVector) Returns: list :", "TV *= x NV *= y BV *= z except TypeError: pass finally:", "OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get", "Transform Matrix try: x = scale_orig[0] * (scale_plus / 100 + 1.0) y", "def getDagPathFromScreen(x, y): \"\"\" Args: x (int or float) y (int or float)", "3d space. Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector :", "\".translate\", *[0, 0, 0]) location = [-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True,", "degree = 0.0 # Get new transform matrix for new object transformMatrix =", "# global MOD_FIRST self.MOD_FIRST = False length, degree = self.getDragInfo(x, y) if qtModifier", "= pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert point_in_3d, vector_in_3d = convertTo3D(x, y)", "pressPosition[1] self.ANCHOR_POINT = [x, y] # Convert point_in_3d, vector_in_3d = convertTo3D(x, y) #", "vector_in_3d, fnMesh): \"\"\" Return a point Position of intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d", "BV = TV ^ NV BV.normalize() # 4x4 Transform Matrix try: x =", "= sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for i", "\"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString)", "if button == 2: cmds.setToolTo('selectSuperContext') return # Get clicked point in viewport screen", "dagpath, or return None if fails tempSel = OpenMaya.MSelectionList() OpenMaya.MGlobal.getActiveSelectionList(tempSel) dagpath = OpenMaya.MDagPath()", "= [x, y] # Convert point_in_3d, vector_in_3d = convertTo3D(x, y) # Get MFnMesh", "== \"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if qtModifier == self.CTRL or", "else: tempSel.getDagPath(0, dagpath) return dagpath def getClosestVertex(point_orig, faceID, fnMesh): \"\"\" Args: point_orig (OpenMaya.MFloatPoint)", "QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): # Parse the arguments. argData =", "button = cmds.draggerContext(DRAGGER, query=True, button=True) # Leave the tool by middle click if", "length = 1.0 degree = 0.0 # Get new transform matrix for new", "True: return hitPoint, faceID else: return None, None def getMatrix(self, mPoint, mVector, targetFnMesh,", "targetFnMesh): \"\"\" Return a tangent vector of a face. Args: faceID (int) mVector", "(int) Returns: OpenMaya.MPoint : point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d", "vector of a face. Args: pointPosition (OpenMaya.MFloatPoint) targetFnMesh (OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent", "OpenMaya.MVector() tangentVector.x = x tangentVector.y = y tangentVector.z = z tangentVector.normalize() return tangentVector", "normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\")", "= \"-dummy\" kInstanceFlag = \"-ilf\" kInstanceFlagLong = \"-instanceLeaf\" # Syntax creator def syntaxCreator():", "None if OP is None and faceID is None: return None qtMod =", "click if button == 2: cmds.setToolTo('selectSuperContext') return # Get clicked point in viewport", "MFnMesh of snap target targetDagPath = getDagPathFromScreen(x, y) # If draggin outside of", "TypeError: pass # Bitangent vector BV = TV ^ NV BV.normalize() # 4x4", "mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin", "matrix_orig[1], matrix_orig[2]) TV.normalize() else: NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) #", "= cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH,", "sys.stderr.write(\"Failed to register command: %s\\n\" % kPluginCmdName) raise def uninitializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject)", "transform matrix for new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG,", "point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE, maxParamPtr, testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr,", "# Ctrl-hold rotation if qtMod == self.CTRL: try: rad = math.radians(degree_plus) q1 =", "t=True) # Apply transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return", "command: %s\\n\" % kPluginCmdName) def convertTo3D(screen_x, screen_y): \"\"\" Return point and vector of", "dagpath : OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod)", "(int or float) y (int or float) Returns: dagpath : OpenMaya.MDagPath \"\"\" #", "opposite = end_y - start_y # Get distance using Pythagorean theorem length =", "context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent,", "Return a tangent vector of a face. Args: faceID (int) mVector (OpenMaya.MVector) Returns:", "cmds try: from PySide.QtGui import QApplication from PySide import QtCore except ImportError: from", "OpenMaya.MVector( matrix_orig[4], matrix_orig[5], matrix_orig[6]) NV.normalize() TV = OpenMaya.MVector( matrix_orig[0], matrix_orig[1], matrix_orig[2]) TV.normalize() else:", "True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is True: self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0)", "Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d = convertTo3D(x, y)", "BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote this file. As long as you", "0]) location = [-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED +", "return # Get clicked point in viewport screen space pressPosition = cmds.draggerContext(DRAGGER, query=True,", "hitPoint = OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections = False faceIDs", "tangentVector.x = x tangentVector.y = y tangentVector.z = z tangentVector.normalize() return tangentVector def", "* math.sin(rad / 2) q3 = NV.z * math.sin(rad / 2) q4 =", "draggin outside of objects if targetDagPath is None: return # Get origianl scale", "point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint = OpenMaya.MFloatPoint() hitFacePtr", "\"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag = \"-d\" kDummyFlagLong = \"-dummy\" kInstanceFlag = \"-ilf\"", "list of values which consist a new transform matrix. Args: mPoint (OpenMaya.MPoint) mVector", "point and vector of clicked point in 3d space. Args: screen_x (int) screen_y", "try: rad = math.radians(degree_plus) q1 = NV.x * math.sin(rad / 2) q2 =", "kInstanceFlag, kInstanceFlagLong, OpenMaya.MSyntax.kBoolean) return syntax class DuplicateOverSurface(OpenMayaMPx.MPxCommand): def __init__(self): super(DuplicateOverSurface, self).__init__() self.ANCHOR_POINT =", "mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID,", "basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length = 99999.0 for index in vertexIndexArray:", "100 + 1.0) TV *= x NV *= y BV *= z except", "= OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong = \"-rotation\" kDummyFlag =", "vector_in_3d = convertTo3D(x, y) length = 1.0 degree = 0.0 # Get new", "= \"-instanceLeaf\" # Syntax creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag,", "q1 = NV.x * math.sin(rad / 2) q2 = NV.y * math.sin(rad /", "is True: self.MOD_POINT = [x, y] # global MOD_FIRST self.MOD_FIRST = False length,", "self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if qtMod == self.CTRL:", "= None self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH =", "= argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0) if argData.isFlagSet(kInstanceFlag) is", "accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is", "faceID is None: return None qtMod = QApplication.keyboardModifiers() if qtMod == (self.CTRL |", "opposite < 0: degree = -degree return cathetus, degree except ZeroDivisionError: return None,", "*= y BV *= z except TypeError: pass finally: matrix = [ TV.x,", "getMatrix(self, mPoint, mVector, targetFnMesh, scale_orig, matrix_orig, scale_plus=1, degree_plus=0.0): \"\"\" Return a list of", "0, NV.x, NV.y, NV.z, 0, BV.x, BV.y, BV.z, 0, OP.x, OP.y, OP.z, 1", "testBothDirections, accelParam, hitPoint, hitRayParam, hitFacePtr, hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result", "OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator) mPlugin.setVersion(\"0.10\") except: sys.stderr.write(\"Failed", "you think this stuff is worth it, you can buy me a beer", "object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while", "self.MATRIX_ORIG) if transformMatrix is None: return # Create new object to snap self.DUPLICATED", "setupDragger(self): \"\"\" Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger =", ": point_in_3d OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld(", "point = OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if lengthVector.length()", "command \"\"\" try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent,", "self.MOD_POINT = [x, y] # global MOD_FIRST self.MOD_FIRST = False length, degree =", "degree = self.getDragInfo(x, y) if qtModifier == self.CTRL: length = 1.0 if qtModifier", "q2, q3, q4) except TypeError: pass # Bitangent vector BV = TV ^", "arguments. argData = OpenMaya.MArgDatabase(syntaxCreator(), args) self.SOURCE = argData.commandArgumentString(0) if argData.isFlagSet(kRotationFlag) is True: self.ROTATION", "matrixs \"\"\" # Position of new object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh)", "= end_x - start_x opposite = end_y - start_y # Get distance using", "= QApplication.keyboardModifiers() if qtModifier == self.CTRL or qtModifier == self.SHIFT: # If this", "for new object transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG, length, degree", "of clicked point in 3d space. Args: screen_x (int) screen_y (int) Returns: OpenMaya.MPoint", "of new object OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh) # If it doesn't", "button=True) # Leave the tool by middle click if button == 2: cmds.setToolTo('selectSuperContext')", "= OpenMaya.MPoint() fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if lengthVector.length() <", "global MOD_FIRST self.MOD_FIRST = False length, degree = self.getDragInfo(x, y) if qtModifier ==", "def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject): mPlugin = OpenMayaMPx.MFnPlugin(mObject, \"<NAME>\") try: mPlugin.registerCommand(kPluginCmdName, cmdCreator)", "getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal vector of a face. Args: pointPosition", "screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None if", "import QApplication from PySide import QtCore except ImportError: from PySide2.QtWidgets import QApplication from", "y): \"\"\" Args: x (int or float) y (int or float) Returns: dagpath", "hitTriangle, hitBary1, hitBary2) faceID = UTIL.getInt(hitFacePtr) if result is True: return hitPoint, faceID", "\"THE BEER-WARE LICENSE\" (Revision 42): # <<EMAIL>> wrote this file. As long as", "the tool by middle click if button == 2: cmds.setToolTo('selectSuperContext') return # Get", "the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\"", "# intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs,", "convertTo3D(screen_x, screen_y): \"\"\" Return point and vector of clicked point in 3d space.", "getNewObject(self): return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0] def dragEvent(self): \"\"\" Event while dragging a 3d view", "/ length degree = math.degrees(math.acos(theta)) if opposite < 0: degree = -degree return", "transformMatrix = self.getMatrix( point_in_3d, vector_in_3d, self.TARGET_FNMESH, self.SCALE_ORIG, self.MATRIX_ORIG) if transformMatrix is None: return", "return cathetus, degree except ZeroDivisionError: return None, None def getIntersection(self, point_in_3d, vector_in_3d, fnMesh):", "0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context command \"\"\" try: cmds.deleteUI(DRAGGER) except:", "math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag", "intersection.. Args: point_in_3d (OpenMaya.MPoint) vector_in_3d (OpenMaya.mVector) Returns: OpenMaya.MFloatPoint : hitPoint \"\"\" hitPoint =", "getDagPathFromScreen(x, y): \"\"\" Args: x (int or float) y (int or float) Returns:", "DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane', undoMode='step', cursor='hand') return dragger def pressEvent(self): button", "for i in range(numOfVtx)]) / numOfVtx z = sum([tangentArray[i].z for i in range(numOfVtx)])", "/ 2) q3 = NV.z * math.sin(rad / 2) q4 = math.cos(rad /", ": faceID \"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal(", "= NV.x * math.sin(rad / 2) q2 = NV.y * math.sin(rad / 2)", "from PySide.QtGui import QApplication from PySide import QtCore except ImportError: from PySide2.QtWidgets import", "Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray =", "import QApplication from PySide2 import QtCore import math import sys DRAGGER = \"duplicateOverSurfaceDragger\"", "hitRayParam = None hitTriangle = None hitBary1 = None hitBary2 = None maxParamPtr", "self.SHIFT = QtCore.Qt.ShiftModifier self.CTRL = QtCore.Qt.ControlModifier def doIt(self, args): # Parse the arguments.", "pass finally: matrix = [ TV.x, TV.y, TV.z, 0, NV.x, NV.y, NV.z, 0,", "testBothDirections = False faceIDs = None triIDs = None accelParam = None hitRayParam", "PySide2 import QtCore import math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil()", "/ 100 + 1.0) y = scale_orig[1] * (scale_plus / 100 + 1.0)", "z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal vector", "of objects if targetDagPath is None: return # Get origianl scale information self.SCALE_ORIG", "# Get MFnMesh of snap target targetDagPath = getDagPathFromScreen(x, y) # If draggin", "def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag, kRotationFlagLong,", "return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal vector of a", "try: cmds.deleteUI(DRAGGER) except: pass dragger = cmds.draggerContext( DRAGGER, pressCommand=self.pressEvent, dragCommand=self.dragEvent, releaseCommand=self.releaseEvent, space='screen', projection='viewPlane',", "1.0) z = scale_orig[2] * (scale_plus / 100 + 1.0) TV *= x", "None: return # Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG", "to snap self.DUPLICATED = self.getNewObject() # Reset transform of current object cmds.setAttr(self.DUPLICATED +", "else: NV = self.getNormal(OP, targetFnMesh) TV = self.getTangent(faceID, targetFnMesh) # Ctrl-hold rotation if", "= getClosestVertex(OP, faceID, targetFnMesh) # Get normal vector and tangent vector if self.ROTATION", "* (scale_plus / 100 + 1.0) z = scale_orig[2] * (scale_plus / 100", "None self.MOD_FIRST = None self.MOD_POINT = None self.SPACE = OpenMaya.MSpace.kWorld self.ROTATION = True", "dragPosition[0] y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier ==", "self.ANCHOR_POINT = None self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG", "* math.sin(rad / 2) q4 = math.cos(rad / 2) TV = TV.rotateBy(q1, q2,", "sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx tangentVector = OpenMaya.MVector() tangentVector.x = x", "y = dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier == \"none\":", "cathetus / length degree = math.degrees(math.acos(theta)) if opposite < 0: degree = -degree", "OpenMaya.MVector : tangent vector int : faceID \"\"\" ptr_int = UTIL.asIntPtr() origin =", "0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True def getDragInfo(self, x, y): \"\"\"", "transformMatrix is None: return # Apply new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\",", "numOfVtx = tangentArray.length() x = sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx y", "cmds.makeIdentity(self.DUPLICATED, apply=True, t=True) # Apply transformMatrix to the new object cmds.xform(self.DUPLICATED, matrix=transformMatrix) def", "= None maxParamPtr = 99999 # intersectPoint = OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint(", "OpenMaya.MFloatPoint() hitFacePtr = UTIL.asIntPtr() idSorted = False testBothDirections = False faceIDs = None", "is None: return # Create new object to snap self.DUPLICATED = self.getNewObject() #", "QApplication from PySide2 import QtCore import math import sys DRAGGER = \"duplicateOverSurfaceDragger\" UTIL", "self.SCALE_ORIG = None self.MATRIX_ORIG = None self.TARGET_FNMESH = None self.MOD_FIRST = None self.MOD_POINT", "of a face. Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector : tangent vector", "(OpenMaya.MFnMesh) Returns: OpenMaya.MVector : tangent vector int : faceID \"\"\" ptr_int = UTIL.asIntPtr()", "tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\" Return a normal vector of a face.", "= math.radians(degree_plus) q1 = NV.x * math.sin(rad / 2) q2 = NV.y *", "Returns: OpenMaya.MVector : tangent vector \"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE)", "(Revision 42): # <<EMAIL>> wrote this file. As long as you retain this", "if self.MOD_FIRST is True: self.MOD_POINT = [x, y] # global MOD_FIRST self.MOD_FIRST =", "+ \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def releaseEvent(self): self.MOD_FIRST = True def", "Select from screen OpenMaya.MGlobal.selectFromScreen( int(x), int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return", "OpenMaya.MFloatPoint( result = fnMesh.closestIntersection( OpenMaya.MFloatPoint( point_in_3d.x, point_in_3d.y, point_in_3d.z), OpenMaya.MFloatVector(vector_in_3d), faceIDs, triIDs, idSorted, self.SPACE,", "i in range(numOfVtx)]) / numOfVtx y = sum([tangentArray[i].y for i in range(numOfVtx)]) /", "fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld) lengthVector = point - basePoint if lengthVector.length() < length: length", "q3 = NV.z * math.sin(rad / 2) q4 = math.cos(rad / 2) TV", "snap target targetDagPath = getDagPathFromScreen(x, y) # If draggin outside of objects if", "= 1.0 degree = 0.0 # Get new transform matrix for new object", "QApplication.keyboardModifiers() if qtMod == (self.CTRL | self.SHIFT): OP = getClosestVertex(OP, faceID, targetFnMesh) #", "---------------------------------------------------------------------------- # from maya import OpenMaya from maya import OpenMayaUI from maya import", "- start_y # Get distance using Pythagorean theorem length = math.sqrt( math.pow(cathetus, 2)", "first click of dragging if self.MOD_FIRST is True: self.MOD_POINT = [x, y] #", "int(y), OpenMaya.MGlobal.kReplaceList, OpenMaya.MGlobal.kSurfaceSelectMethod) # Get dagpath, or return None if fails tempSel =", "creator def syntaxCreator(): syntax = OpenMaya.MSyntax() syntax.addArg(OpenMaya.MSyntax.kString) syntax.addFlag( kDummyFlag, kDummyFlagLong, OpenMaya.MSyntax.kBoolean) syntax.addFlag( kRotationFlag,", "stuff. If we meet some day, # and you think this stuff is", "x = scale_orig[0] * (scale_plus / 100 + 1.0) y = scale_orig[1] *", "theorem length = math.sqrt( math.pow(cathetus, 2) + math.pow(opposite, 2)) try: theta = cathetus", "= None hitTriangle = None hitBary1 = None hitBary2 = None maxParamPtr =", "\"\"\" tangentArray = OpenMaya.MFloatVectorArray() targetFnMesh.getFaceVertexTangents( faceID, tangentArray, self.SPACE) numOfVtx = tangentArray.length() x =", "= y tangentVector.z = z tangentVector.normalize() return tangentVector def getNormal(self, pointPosition, targetFnMesh): \"\"\"", "\"\"\" Args: point_orig (OpenMaya.MFloatPoint) faceID (int) fnMesh (OpenMaya.MFnMesh) Returns: closestPoint : OpenMaya.MPoint \"\"\"", "= None hitRayParam = None hitTriangle = None hitBary1 = None hitBary2 =", "2) + math.pow(opposite, 2)) try: theta = cathetus / length degree = math.degrees(math.acos(theta))", "= None triIDs = None accelParam = None hitRayParam = None hitTriangle =", "\".translate\", *location) # Can't apply freeze to instances if self.InstanceFlag is not True:", "length, degree = self.getDragInfo(x, y) if qtModifier == self.CTRL: length = 1.0 if", "angle in screen space. \"\"\" start_x = self.MOD_POINT[0] start_y = self.MOD_POINT[1] end_x =", "0, 0]) location = [-i for i in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)] cmds.setAttr(self.DUPLICATED", "objects if targetDagPath is None: return # Get origianl scale information self.SCALE_ORIG =", "\"\"\" vertexIndexArray = OpenMaya.MIntArray() fnMesh.getPolygonVertices(faceID, vertexIndexArray) basePoint = OpenMaya.MPoint(point_orig) closestPoint = OpenMaya.MPoint() length", "= argData.flagArgumentBool(kInstanceFlag, 0) cmds.setToolTo(self.setupDragger()) def setupDragger(self): \"\"\" Setup dragger context command \"\"\" try:", "self.SPACE, ptr_int) normal.normalize() return normal # Creator def cmdCreator(): return OpenMayaMPx.asMPxPtr(DuplicateOverSurface()) def initializePlugin(mObject):", "\".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix = self.getMatrix( point_in_3d,", "= None self.DUPLICATED = None self.SOURCE = None self.SCALE_ORIG = None self.MATRIX_ORIG =", "UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal = OpenMaya.MVector() targetFnMesh.getClosestNormal( origin, normal, self.SPACE, ptr_int) normal.normalize()", "*= x NV *= y BV *= z except TypeError: pass finally: matrix", "None: return # Create new object to snap self.DUPLICATED = self.getNewObject() # Reset", "= math.cos(rad / 2) TV = TV.rotateBy(q1, q2, q3, q4) except TypeError: pass", "(int or float) Returns: dagpath : OpenMaya.MDagPath \"\"\" # Select from screen OpenMaya.MGlobal.selectFromScreen(", "tangent vector of a face. Args: faceID (int) mVector (OpenMaya.MVector) Returns: OpenMaya.MVector :", "mPlugin = OpenMayaMPx.MFnPlugin(mObject) try: mPlugin.deregisterCommand(kPluginCmdName) except: sys.stderr.write(\"Failed to unregister command: %s\\n\" % kPluginCmdName)", "tool by middle click if button == 2: cmds.setToolTo('selectSuperContext') return # Get clicked", "transformMatrix is None: return # Create new object to snap self.DUPLICATED = self.getNewObject()", "new transform cmds.xform(self.DUPLICATED, matrix=transformMatrix) cmds.setAttr(self.DUPLICATED + \".shear\", *[0, 0, 0]) cmds.refresh(currentView=True, force=True) def", "# Get origianl scale information self.SCALE_ORIG = cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE,", "= UTIL.asIntPtr() idSorted = False testBothDirections = False faceIDs = None triIDs =", "None triIDs = None accelParam = None hitRayParam = None hitTriangle = None", "OpenMaya.MVector : vector_in_3d \"\"\" point_in_3d = OpenMaya.MPoint() vector_in_3d = OpenMaya.MVector() OpenMayaUI.M3dView.active3dView().viewToWorld( int(screen_x), int(screen_y),", "\"\"\" Return a list of values which consist a new transform matrix. Args:", "lengthVector = point - basePoint if lengthVector.length() < length: length = lengthVector.length() closestPoint", "DRAGGER = \"duplicateOverSurfaceDragger\" UTIL = OpenMaya.MScriptUtil() kPluginCmdName = \"duplicateOverSurface\" kRotationFlag = \"-r\" kRotationFlagLong", "if modifier == \"none\": self.MOD_FIRST = True qtModifier = QApplication.keyboardModifiers() if qtModifier ==", "y): \"\"\" Get distance and angle in screen space. \"\"\" start_x = self.MOD_POINT[0]", "intersect to any geometries, return None if OP is None and faceID is", "vector int : faceID \"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal =", "start_y = self.MOD_POINT[1] end_x = x end_y = y cathetus = end_x -", "= OpenMaya.MVector() tangentVector.x = x tangentVector.y = y tangentVector.z = z tangentVector.normalize() return", "= cmds.getAttr(self.SOURCE + \".scale\")[0] self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True) self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath) transformMatrix", "tangent vector int : faceID \"\"\" ptr_int = UTIL.asIntPtr() origin = OpenMaya.MPoint(pointPosition) normal", "= 0.0 # Convert point_in_3d, vector_in_3d = convertTo3D( self.MOD_POINT[0], self.MOD_POINT[1]) else: point_in_3d, vector_in_3d", "= dragPosition[1] modifier = cmds.draggerContext( DRAGGER, query=True, modifier=True) if modifier == \"none\": self.MOD_FIRST", "vector_in_3d = convertTo3D(x, y) # Get MFnMesh of snap target targetDagPath = getDagPathFromScreen(x,", "think this stuff is worth it, you can buy me a beer in" ]
[ "__future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate',", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate', '0014_auto_20160224_0207'),", "migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate', '0014_auto_20160224_0207'), ] operations =", "utf-8 -*- # Generated by Django 1.9.1 on 2016-02-26 06:25 from __future__ import", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate', '0014_auto_20160224_0207'), ]", "Django 1.9.1 on 2016-02-26 06:25 from __future__ import unicode_literals from django.db import migrations", "unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate',", "-*- # Generated by Django 1.9.1 on 2016-02-26 06:25 from __future__ import unicode_literals", "1.9.1 on 2016-02-26 06:25 from __future__ import unicode_literals from django.db import migrations class", "# Generated by Django 1.9.1 on 2016-02-26 06:25 from __future__ import unicode_literals from", "Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate', '0014_auto_20160224_0207'), ] operations = [ ]", "06:25 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies =", "from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [", "import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'),", "-*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-02-26 06:25 from", "on 2016-02-26 06:25 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate', '0014_auto_20160224_0207'), ] operations = [", "by Django 1.9.1 on 2016-02-26 06:25 from __future__ import unicode_literals from django.db import", "Generated by Django 1.9.1 on 2016-02-26 06:25 from __future__ import unicode_literals from django.db", "coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-02-26 06:25 from __future__", "2016-02-26 06:25 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies", "# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-02-26 06:25", "import migrations class Migration(migrations.Migration): dependencies = [ ('measure_mate', '0011_auto_20160225_1305'), ('measure_mate', '0014_auto_20160224_0207'), ] operations" ]
[]
[ "self.send('ping') except BaseException: with self.state_lock: self._remote_closed = True def force_closed(self): with self.state_lock: return", "return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def", "from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth from", "def apphelp(mpd, _param): output = '' output += \"=== Shell Commands ===\\n\" for", "\"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\",", "\"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output =", "echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output != '' or local_output !=", "datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask:", "FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"),", "return msg def pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop() def runscript(self, param):", "= Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone", "= FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl(", "ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector()", "in events: callback = key.data callback(key.fileobj, mask) def send(self, message: str): with self._io_lock:", "= mpcscript.read() return self.send(data) def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except", "selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks = []", "\"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\",", "===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output += ' - '", "DEBUG import selectors import socket import sys import threading import time import selectors", "APP = None mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\",", "threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running", "{ \"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x), \"help\":", "import Path from threading import Lock from typing import List from prompt_toolkit import", "(DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if", "BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text ),", "netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0,", "as ex: print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) ->", "linedown]) container = FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ),", "self._timer = None self.interval = interval self.function = function self.args = args self.kwargs", "floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg): if", "{baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\":", "\"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\",", "gen_style() -> Style: base00 = '#000000' base01 = '#202020' base02 = '#303030' base03", "threading import Lock from typing import List from prompt_toolkit import HTML from prompt_toolkit", "_receive(self, connection): chunks = [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data)", ") ], style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document = Document(", "onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def", "import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout", "output += ' ' * spaces + l + '\\n' return output def", "), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg):", "SearchToolbar, TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME", "| (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output = '' output", "except BaseException as ex: print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def", "= accept # The key bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position", "Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> |", "\"\"\"Pressing Ctrl-Q or Ctrl-C will exit the user interface.\"\"\" event.app.exit() #### # Here", "port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock = Lock() self.state_lock =", "the main loop sort of #### def netpoll(): if not mpd: return sockdebug_print(", "for c in internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param):", "FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ),", "nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1,", "msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken>", "debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd", "connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR)", "self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException:", "from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style from", "don't get written into the output view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\",", "time import selectors from datetime import datetime from pathlib import Path from threading", "self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock: while len(self._outbuffer) > 0: msg =", ") def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0 )", "self.kwargs = kwargs self.is_running = False self.next_call = time.time() self.start() def _run(self): self.is_running", "= '#b31e8d' base0F = '#7a2d00' baseA0 = '#242424' baseA1 = '#06A191' return Style.from_dict(", "sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print(", "get broken into multiple buffer if the size isn't big enough or your", "self.send(data) def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex:", "echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\")", "\"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\",", "500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit the user", "\"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\",", "+= ' ' * spaces + l + '\\n' return output def accept(buff):", "be waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get", "not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop() mpd.disconnect() if __name__ == '__main__': main()", "two. (default: 4096)\", type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP = args.debug alive_tick", "view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP", "((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output = ''", "\"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return", ") lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice =", "int): self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = [] self._echobuffer = []", "= [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\",", "_param): output = '' output += \"=== Shell Commands ===\\n\" for c in", "output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match = grammar.match(buff.text)", "interval, function, *args, **kwargs): self._timer = None self.interval = interval self.function = function", "= '#202020' base02 = '#303030' base03 = '#505050' base04 = '#909090' base05 =", "type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick port", "port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data,", "mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer", "lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands =", "NOECHO = False APP = None mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\",", "lineup, nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text", "'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close()", "), ] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" )", "'utf-8') self.socket_lock = Lock() self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket,", "(Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers", "\"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\",", "\"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\",", "\"scripts\": lambda s, x: listscripts(s, x), \"help\": lambda s, x: apphelp(s, x), \"mpchelp\":", "= Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document =", "output += \"=== MPC Commands ===\\n\" for c in mpdcmds: output += str(c)", "\"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"):", "*args, **kwargs): self._timer = None self.interval = interval self.function = function self.args =", "alive_tick = args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port)", "- time.time(), self._run) self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running = False", "{}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The key bindings.", "print from prompt_toolkit.application import Application, application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import", "x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run application. application = Application( layout=Layout(container,", "Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop)", "internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\")", "\"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\",", "send(self, message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask = mask", "base04 = '#909090' base05 = '#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff' base08", "recv_output if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print(", "'mpdscripts' DEBUGAPP = False NOECHO = False APP = None mpdcmds = [", "base03 = '#505050' base04 = '#909090' base05 = '#bfbfbf' base06 = '#e0e0e0' base07", "default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written into the output view", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock", "#################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print( f\"[{", "0: msg = self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self,", "self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() == 'OK': return None else: return msg", "text=msg, cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg,", "grammar.match(buff.text) if match: params = match.variables() execcmd = params.get(\"exec\") if execcmd is not", "of two. (default: 4096)\", type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP = args.debug", "def force_closed(self): with self.state_lock: return self._remote_closed def poll(self): events = self.selector.select() for key,", "new_text = output_field.text if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()})", "prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import", "wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window(", "execcmd = params.get(\"exec\") if execcmd is not None: params = params.get(\"execparam\") funcptr =", "enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret is not None: mpd.send(f\"password {args.secret}\")", "l in text.splitlines(): output += ' ' * spaces + l + '\\n'", "not self.is_running: self.next_call += self.interval self._timer = threading.Timer( self.next_call - time.time(), self._run) self._timer.start()", "* spaces + l + '\\n' return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection", "as mpcscript: data = mpcscript.read() return self.send(data) def disconnect(self, *argv): with self.socket_lock: try:", "| TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4>", "= '#f8ca12' base0B = '#FF6236' base0C = '#00aabb' base0D = '#0e5a94' base0E =", "{mpd.peek_echobuffer()}\") new_text += local_output if recv_output != '' or local_output != '': output_field.document", "required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\",", "from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer", "selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0 def data_available(self) -> bool: with", "mpd shell scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files:", "-> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed =", "print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar = create_grammar() intro_text = HTML(f\"Connected", "f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00 = '#000000' base01 =", "base0E = '#b31e8d' base0F = '#7a2d00' baseA0 = '#242424' baseA1 = '#06A191' return", "connection): with self._io_lock: while len(self._outbuffer) > 0: msg = self._outbuffer.pop() command = str(msg", "\"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda s, x:", "= WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\": keywords, \"exec\":", "= Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field =", "peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self)", "in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output += '", "keywords, \"exec\": intern_keywords }, ) search_field = SearchToolbar() # For reverse search. output_field", "SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys())", "base0C = '#00aabb' base0D = '#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00' baseA0", "output += ' - ' + file.name + \"\\n\" output += f'\\n\\n----\\n=> Total:", "= kwargs self.is_running = False self.next_call = time.time() self.start() def _run(self): self.is_running =", "of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD is running", "> 0 def echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer) > 0 def", "') def main(): global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host", "mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match = grammar.match(buff.text) if match: params =", "base0D = '#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00' baseA0 = '#242424' baseA1", "mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except", "= application if args.secret is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop() mpd.disconnect()", "self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) |", "') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO, APP", "= args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar = create_grammar() intro_text", "\"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\",", "height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd =", "indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK", "key, mask in events: callback = key.data callback(key.fileobj, mask) def send(self, message: str):", "\"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\",", "\"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds =", "# The key bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500", "= params.get(\"exec\") if execcmd is not None: params = params.get(\"execparam\") funcptr = internalcmds.get(", "def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ')", "with self._io_lock: while len(self._outbuffer) > 0: msg = self._outbuffer.pop() command = str(msg +", "if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{", "def runscript(self, param): params = param.split(' ') file = params[0] with open(SCRIPT_HOME /", "message might get broken into multiple buffer if the size isn't big enough", "COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message", "#!/usr/bin/env python3 import argparse import asyncio from re import DEBUG import selectors import", "from prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE", "exit the user interface.\"\"\" event.app.exit() #### # Here happens the main loop sort", "x), \"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x) }", "0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP buffer. A", "style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if", "debugzone = HSplit([]) if args.debug: debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd,", "WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords", "0x0 def data_available(self) -> bool: with self._io_lock: return len(self._inbuffer) > 0 def echo_available(self)", "===\\n\" for c in mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd,", "\"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\",", "if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e: tb =", "{mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output", "from threading import Lock from typing import List from prompt_toolkit import HTML from", "with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile(", "| ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output = '' output += \"===", "s, x: listscripts(s, x), \"help\": lambda s, x: apphelp(s, x), \"mpchelp\": lambda s,", "buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e: tb = sys.exc_info()[2]", "happens the main loop sort of #### def netpoll(): if not mpd: return", "help=\"Own commands don't get written into the output view (default: 0)\", type=bool, default=False,", "\"reset\": lambda s, x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval, function, *args,", "prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from", "= internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd", "\"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\",", "output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00 = '#000000'", "self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self.next_call += self.interval self._timer =", "= '' output += \"=== MPC Commands ===\\n\" for c in mpdcmds: output", "\"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\",", "height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd", "}, ) commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer", "\"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count): return", "is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop() mpd.disconnect() if __name__ == '__main__':", "help=\"Initialize connection with this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal", "new_text += recv_output if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()})", "\"help\": lambda s, x: apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\":", "\"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\",", "with open(SCRIPT_HOME / file) as mpcscript: data = mpcscript.read() return self.send(data) def disconnect(self,", "(default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a keep", "Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\",", "prompt_toolkit.application import Application, application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from", "\"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\",", "'#000000' base01 = '#202020' base02 = '#303030' base03 = '#505050' base04 = '#909090'", "commands don't get written into the output view (default: 0)\", type=bool, default=False, required=False)", "True def stop(self): self._timer.cancel() self.is_running = False class MPDClient(object): def __init__(self, hostname: str,", "default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a keep a live should", "\"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\",", "\"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\",", "\"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\",", "grammar, { \"func\": keywords, \"exec\": intern_keywords }, ) search_field = SearchToolbar() # For", "+ l + '\\n' return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by", "style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window(", "The key bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\")", "\"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\",", "Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth", "f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\":", "self._echobuffer.pop() def runscript(self, param): params = param.split(' ') file = params[0] with open(SCRIPT_HOME", "= [] self._outbuffer = [] self._echobuffer = [] self.server = hostname self.port =", "= False class MPDClient(object): def __init__(self, hostname: str, port: int): self.selector = selectors.DefaultSelector()", "\"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds", "self.next_call = time.time() self.start() def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def", "\"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\",", "self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running = False class MPDClient(object): def", "color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret is not None: mpd.send(f\"password {args.secret}\") application.run()", "new_text += local_output if recv_output != '' or local_output != '': output_field.document =", "f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message = mpd.pop_message() if", "host of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD is", "= [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar,", "or your network can't support it. For optimal performance choose a size with", "netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output", "event.app.exit() #### # Here happens the main loop sort of #### def netpoll():", "self.interval = interval self.function = function self.args = args self.kwargs = kwargs self.is_running", "from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document", "import Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import", "{mpd.peek_echobuffer()}\") new_text += recv_output if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW):", "args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar =", "search. output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer()", "= Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end #################################### autoping = RepeatedTimer(3.0,", "default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP buffer. A message might", "Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application", "args self.kwargs = kwargs self.is_running = False self.next_call = time.time() self.start() def _run(self):", "from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from", "Here happens the main loop sort of #### def netpoll(): if not mpd:", "output = '' output += \"=== Shell Commands ===\\n\" for c in internalcmds.keys():", "datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") ####################################", "\"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\",", "command\"): return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return", "help=\"The port on which MPD is running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\",", "\"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\",", "output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit", "= interval self.function = function self.args = args self.kwargs = kwargs self.is_running =", "in mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output =", "local_output if recv_output != '' or local_output != '': output_field.document = Document( text=new_text,", ") APP = application if args.secret is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop()", "<c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\":", "echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def indent(text: str, spaces=2): output = ''", ") search_field = SearchToolbar() # For reverse search. output_field = Buffer() netdbg_buffer =", "\"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\",", "sort of #### def netpoll(): if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask:", "DEBUGAPP = args.debug alive_tick = args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd", "stop(self): self._timer.cancel() self.is_running = False class MPDClient(object): def __init__(self, hostname: str, port: int):", "self._remote_closed = False self.dbg_lastmask = 0x0 def data_available(self) -> bool: with self._io_lock: return", "base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\",", "completer = GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords }, ) search_field =", "asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP = False", "-> int: return len(self._echobuffer) def pop_message(self) -> str: with self._io_lock: msg = self._inbuffer.pop()", "Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg): if not", "selectors import socket import sys import threading import time import selectors from datetime", "typing import List from prompt_toolkit import HTML from prompt_toolkit import print_formatted_text as print", "def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno,", "\"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\",", "= KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position +=", "== \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError:", "output += str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def", "echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output +=", "#### # Here happens the main loop sort of #### def netpoll(): if", "client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled:", "_run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self.next_call", "global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD", "\"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\",", "it. For optimal performance choose a size with the power of two. (default:", "\"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\",", "= HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App", "grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands", "TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup =", "APP = application if args.secret is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop()", "-> str: with self._io_lock: return self._echobuffer.pop() def runscript(self, param): params = param.split(' ')", "def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection", "base0A = '#f8ca12' base0B = '#FF6236' base0C = '#00aabb' base0D = '#0e5a94' base0E", "/ 'mpdscripts' DEBUGAPP = False NOECHO = False APP = None mpdcmds =", "Document( text=msg, cursor_position=0 ) def indent(text: str, spaces=2): output = '' for l", "style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info", "self._timer.cancel() self.is_running = False class MPDClient(object): def __init__(self, hostname: str, port: int): self.selector", "| Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer =", "= Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO = False APP = None", "self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection closed by remote: {}\".format(ex))", "files: output += ' - ' + file.name + \"\\n\" output += f'\\n\\n----\\n=>", "compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd,", "self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR)", "self.state_lock: return self._remote_closed def poll(self): events = self.selector.select() for key, mask in events:", "from typing import List from prompt_toolkit import HTML from prompt_toolkit import print_formatted_text as", "keep a live should be waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\",", "into the output view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size", "\"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\",", "\"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\",", "peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self) -> str: with self._io_lock: msg =", "def start(self): if not self.is_running: self.next_call += self.interval self._timer = threading.Timer( self.next_call -", "\"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\",", "echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}]", "peer\") try: match = grammar.match(buff.text) if match: params = match.variables() execcmd = params.get(\"exec\")", "shell scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output", "local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}]", "from prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector() loop", "4096)\", type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick", "import datetime from pathlib import Path from threading import Lock from typing import", "mask in events: callback = key.data callback(key.fileobj, mask) def send(self, message: str): with", "\"--alive-tick\", help=\"How many seconds between a keep a live should be waited. (default:", "x: apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\": lambda s, x:", "args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar = create_grammar() intro_text =", "#################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) #", "= mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{", "\"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\",", "else: cmd = params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text)", "GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords }, ) search_field = SearchToolbar() #", "ping(self) -> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed", "HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from", "datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION", "echomsg = mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{", "Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask =", "0 def echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self)", "output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end #################################### autoping =", "import selectors import socket import sys import threading import time import selectors from", "return socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP: return", "listscripts(mpd, _param): output = f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\" ===' files", "= Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False,", "), height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd", "netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT", "{args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar = create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1>", "buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll", "\"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\",", "search_field = SearchToolbar() # For reverse search. output_field = Buffer() netdbg_buffer = Buffer()", "self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0 def", "return echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def indent(text: str, spaces=2): output =", "base09 = '#f29333' base0A = '#f8ca12' base0B = '#FF6236' base0C = '#00aabb' base0D", "-> Style: base00 = '#000000' base01 = '#202020' base02 = '#303030' base03 =", "cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0", "Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field,", "c in mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output", "from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import", "grammar = create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep", "{mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\")", "f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file", "application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\", "accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match = grammar.match(buff.text) if match:", "__init__(self, interval, function, *args, **kwargs): self._timer = None self.interval = interval self.function =", "= self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with", "\"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\",", "= Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix,", "time.time(), self._run) self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running = False class", "port) grammar = create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings =", "self._echobuffer = [] self.server = hostname self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available():", "= Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl(", "<c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={", "\"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\",", "= '#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09,", "if match: params = match.variables() execcmd = params.get(\"exec\") if execcmd is not None:", "input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\")", "output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output", "+ \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output = '' output += \"=== Shell", "selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0 def data_available(self) ->", ") def mpchelp(mpd, _param): output = '' output += \"=== MPC Commands ===\\n\"", "application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP", "return self._remote_closed def poll(self): events = self.selector.select() for key, mask in events: callback", "(DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output != '' or local_output != '':", "= '' while mpd.data_available(): message = mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output", "lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ]", "linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window(", "a live should be waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own", "= sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler =", "| selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0 def data_available(self) -> bool:", "def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output = f'=== Available", "'#b31e8d' base0F = '#7a2d00' baseA0 = '#242424' baseA1 = '#06A191' return Style.from_dict( {", "threading import time import selectors from datetime import datetime from pathlib import Path", "lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True,", "\"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\",", "\"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\",", "HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\")", "| Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4>", "\"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\",", "output_field.text if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print(", "r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param):", "= True def force_closed(self): with self.state_lock: return self._remote_closed def poll(self): events = self.selector.select()", "self.dbg_lastmask = mask if mask & selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection)", "!= '': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end ####################################", "focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret", "\"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\",", "for c in mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param):", "execcmd is not None: params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s,", "import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer", "class MPDClient(object): def __init__(self, hostname: str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer =", "style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\", )", "intern_keywords }, ) search_field = SearchToolbar() # For reverse search. output_field = Buffer()", "HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1,", "\"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\",", "onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will", "= hostname self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data =", "mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output = ''", "recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}]", "from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer,", "def data_available(self) -> bool: with self._io_lock: return len(self._inbuffer) > 0 def echo_available(self) ->", "if not self.is_running: self.next_call += self.interval self._timer = threading.Timer( self.next_call - time.time(), self._run)", "prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu", "None: params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal", "\"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\",", "<c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo", "time.time() self.start() def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self): if", "= [] self.server = hostname self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname,", "prompt_toolkit import HTML from prompt_toolkit import print_formatted_text as print from prompt_toolkit.application import Application,", "self.next_call - time.time(), self._run) self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running =", "in files: output += ' - ' + file.name + \"\\n\" output +=", "print_formatted_text as print from prompt_toolkit.application import Application, application from prompt_toolkit.completion import WordCompleter from", "= TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup", "get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def", "self.server = hostname self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data", "content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document", "import sys import threading import time import selectors from datetime import datetime from", "down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar,", "netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### #", "between a keep a live should be waited. (default: 3)\", type=int, default=3, required=False)", "command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\":", "import Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector)", "match = grammar.match(buff.text) if match: params = match.variables() execcmd = params.get(\"exec\") if execcmd", "READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available():", "self._timer = threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running = True def stop(self):", "mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e:", "\"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\",", "#################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message =", "6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password (default: None)\",", "tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text =", "if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}]", "from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl)", "reset!\") def listscripts(mpd, _param): output = f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\"", "parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a keep a live should be waited.", "\"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\",", "= str(data, 'utf-8') self.socket_lock = Lock() self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False)", "> 0: msg = self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def", "not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer:", "params) else: cmd = params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text)", "return len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self) -> int: return", "HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return", "(DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer", "HTML from prompt_toolkit import print_formatted_text as print from prompt_toolkit.application import Application, application from", "len(self._inbuffer) > 0 def echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer) > 0", "\"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken>", "'OK': return None else: return msg def pop_echo(self) -> str: with self._io_lock: return", "selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = [] self._echobuffer = [] self.server = hostname", "bool: with self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self) -> int: return len(self._inbuffer)", "return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count):", "indent(text: str, spaces=2): output = '' for l in text.splitlines(): output += '", "self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\"", "prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector", "SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text", "+= str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd,", "up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>\")", "mpd.local_echo(output) def gen_style() -> Style: base00 = '#000000' base01 = '#202020' base02 =", "CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style", "\"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds) keywords", "\"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\":", "'': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end #################################### autoping", "height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1,", "listscripts(s, x), \"help\": lambda s, x: apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s,", "else: return msg def pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop() def runscript(self,", "base00 = '#000000' base01 = '#202020' base02 = '#303030' base03 = '#505050' base04", "False NOECHO = False APP = None mpdcmds = [ \"add\", \"addid\", \"addtagid\",", "invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd = params.get(\"func\") if cmd not in", "Commands ===\\n\" for c in internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output) def", "data = mpcscript.read() return self.send(data) def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8'))", "len(self._echobuffer) > 0 def peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self) -> int:", "= '#00aabb' base0D = '#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00' baseA0 =", "= connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock: while", "sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output != '': netdebug_print(", "\"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid", "= args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar", "return None else: return msg def pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop()", "if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock: while len(self._outbuffer) >", "f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs #################################### sockdebug_print(", "datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{", "self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask = mask if mask & selectors.EVENT_READ: self._receive(connection)", "the size isn't big enough or your network can't support it. For optimal", "= None self.interval = interval self.function = function self.args = args self.kwargs =", "prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from", "ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed = True def force_closed(self): with", "socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯", "<b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"),", "tb.tb_lineno)) input_field.accept_handler = accept # The key bindings. kb = KeyBindings() @kb.add(\"pageup\") def", "bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event):", "self.socket_lock = Lock() self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ", "parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The", "= port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg =", "output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT #################################### sockdebug_print(", "0 def peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer)", "Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try", "Application, application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import", "<c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer", "def indent(text: str, spaces=2): output = '' for l in text.splitlines(): output +=", "import time import selectors from datetime import datetime from pathlib import Path from", "def __init__(self, interval, function, *args, **kwargs): self._timer = None self.interval = interval self.function", "{base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01}", "# For reverse search. output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer()", "parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP buffer. A message might get broken", "import Application, application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion", "import HTML from prompt_toolkit import print_formatted_text as print from prompt_toolkit.application import Application, application", "ex: print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool:", "Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False,", "), height=1, style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), )", "self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+)", "char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\",", "reverse search. output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer =", "WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import", "Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd = Window(", "mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret is not None:", "argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on", "self._inbuffer.pop() if str(msg).strip() == 'OK': return None else: return msg def pop_echo(self) ->", "s, x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x), \"help\": lambda s, x:", "& selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks =", "self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock:", "kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position", "= { \"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x),", "'#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00' baseA0 = '#242424' baseA1 = '#06A191'", "Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True, ycursor=True,", "import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer,", "'#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\":", "SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while", "# Here happens the main loop sort of #### def netpoll(): if not", "socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document", "True def force_closed(self): with self.state_lock: return self._remote_closed def poll(self): events = self.selector.select() for", "**self.kwargs) def start(self): if not self.is_running: self.next_call += self.interval self._timer = threading.Timer( self.next_call", "wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\")", "{}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The key", "= function self.args = args self.kwargs = kwargs self.is_running = False self.next_call =", "= GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), },", "BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone = HSplit([", "| ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output =", "(default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default: 0)\", type=bool,", "params = param.split(' ') file = params[0] with open(SCRIPT_HOME / file) as mpcscript:", "type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a keep a live", "base02 = '#303030' base03 = '#505050' base04 = '#909090' base05 = '#bfbfbf' base06", "text=msg, cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document = Document( text=msg,", "echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY ####################################", "\"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\",", "def __init__(self, hostname: str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer", "WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords }, ) search_field", "by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping') def ping_unchecked(self):", "datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{", "BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import", "layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if", "import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window)", "default=4096, required=False) args = parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick port =", "= False NOECHO = False APP = None mpdcmds = [ \"add\", \"addid\",", "[] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self,", "\"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\",", "'#202020' base02 = '#303030' base03 = '#505050' base04 = '#909090' base05 = '#bfbfbf'", "output = '' output += \"=== MPC Commands ===\\n\" for c in mpdcmds:", "Commands ===\\n\" for c in mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output) def", "def main(): global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of", "with the power of two. (default: 4096)\", type=int, default=4096, required=False) args = parser.parse_args()", "\"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\",", "= WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords }, )", "application.invalidate() #################################### # netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd)", "self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0 def data_available(self) -> bool: with self._io_lock:", "except BaseException: with self.state_lock: self._remote_closed = True def force_closed(self): with self.state_lock: return self._remote_closed", "tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The key bindings. kb = KeyBindings()", "mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output = f'=== Available mpd shell scripts in", "input_field.accept_handler = accept # The key bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event):", "def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def", "datetime import datetime from pathlib import Path from threading import Lock from typing", "return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\",", "prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector() loop =", "SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while", "\"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\",", "e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno))", "\"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\",", "False self.next_call = time.time() self.start() def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs)", "] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def", "mpcscript.read() return self.send(data) def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException", "lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"),", "peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self)", "pathlib import Path from threading import Lock from typing import List from prompt_toolkit", "#################################### # netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll", "get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text ), height=1,", "style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1,", "__init__(self, hostname: str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer =", "with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection closed by remote:", "local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE", "internalcmds = { \"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s,", "datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN):", "if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match = grammar.match(buff.text) if match: params", "self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed = True def", "datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}]", "\"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\",", "or Ctrl-C will exit the user interface.\"\"\" event.app.exit() #### # Here happens the", "FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer", "' - ' + file.name + \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output)", "import SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets import", "self.socket.close() def ping(self) -> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException: with", "not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if not", "\"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"]", "callback = key.data callback(key.fileobj, mask) def send(self, message: str): with self._io_lock: self._outbuffer.append(message) def", "' + file.name + \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style()", "args.debug alive_tick = args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host,", "+= 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit the", "\"--buffer-size\", help=\"The size of one TCP buffer. A message might get broken into", "echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow =", "buffer. A message might get broken into multiple buffer if the size isn't", "running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password", "\"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\",", "echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def indent(text:", "\"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\",", "datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output != '': netdebug_print( f\"[{", "TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME =", "@kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or", "type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written into the output", "{mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg", "Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer,", "lambda s, x: mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x) } class RepeatedTimer(object):", "HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return", "\"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\",", "= 4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO = False", "commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\":", "internal command\")) funcptr(mpd, params) else: cmd = params.get(\"func\") if cmd not in mpdcmds:", "self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg", "'' or local_output != '': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### #", "output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field", "= parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick port = args.port print(f\"Connecting to", "input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT ####################################", "For reverse search. output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer", "base06 = '#e0e0e0' base07 = '#ffffff' base08 = '#eb008a' base09 = '#f29333' base0A", "prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from", "return self.send(data) def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as", "style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd,", "return netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP: return", "of one TCP buffer. A message might get broken into multiple buffer if", "prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document from", "output += str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output = '' output", "int: return len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self) -> int:", "wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno,", "\"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\",", "get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main():", "= False self.next_call = time.time() self.start() def _run(self): self.is_running = False self.start() self.function(*self.args,", "= MPDClient(args.host, port) grammar = create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\")", "f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer", "connection, mask): self.dbg_lastmask = mask if mask & selectors.EVENT_READ: self._receive(connection) if mask &", "# SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = ''", "= self.selector.select() for key, mask in events: callback = key.data callback(key.fileobj, mask) def", "= '#000000' base01 = '#202020' base02 = '#303030' base03 = '#505050' base04 =", "= False self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self.next_call += self.interval", "\"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = {", "self.state_lock: self._remote_closed = True def force_closed(self): with self.state_lock: return self._remote_closed def poll(self): events", "baseA0 = '#242424' baseA1 = '#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\": base08,", "{}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The key bindings. kb", "message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask = mask if", "required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP buffer. A message might get", "sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept", "}, ) search_field = SearchToolbar() # For reverse search. output_field = Buffer() netdbg_buffer", "port: int): self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = [] self._echobuffer =", "isn't big enough or your network can't support it. For optimal performance choose", "[ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1,", "> 0 def peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self) -> int: return", "style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\"", "def ping(self) -> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock:", "-= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing", "c in internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset()", "TCP buffer. A message might get broken into multiple buffer if the size", "**kwargs): self._timer = None self.interval = interval self.function = function self.args = args", "height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone = HSplit([ lineup,", "funcptr(mpd, params) else: cmd = params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else:", "\"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\",", "+= str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output = '' output +=", "style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret is not", "def gen_style() -> Style: base00 = '#000000' base01 = '#202020' base02 = '#303030'", "<c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command prefix:", "full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret is not None: mpd.send(f\"password", "def pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop() def runscript(self, param): params =", "[ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\",", "sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}]", "\"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\",", "completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\")", "Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text", "parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\",", "lambda s, x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs):", "buffer if the size isn't big enough or your network can't support it.", "len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self) -> str: with self._io_lock:", "selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks = [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE)", "self._io_lock: while len(self._outbuffer) > 0: msg = self._outbuffer.pop() command = str(msg + '\\n')", "HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO,", "get written into the output view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\",", "size of one TCP buffer. A message might get broken into multiple buffer", "try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection closed by remote: {}\".format(ex)) finally:", "BaseException: with self.state_lock: self._remote_closed = True def force_closed(self): with self.state_lock: return self._remote_closed def", "self._run) self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running = False class MPDClient(object):", "str(c) + \"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output = '' output += \"===", "mpd.local_echo(output) def apphelp(mpd, _param): output = '' output += \"=== Shell Commands ===\\n\"", "<c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up:", "), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix,", "netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output", "param.split(' ') file = params[0] with open(SCRIPT_HOME / file) as mpcscript: data =", "\"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda s, x: s.runscript(x), \"scripts\":", "not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit() else:", "def poll(self): events = self.selector.select() for key, mask in events: callback = key.data", "netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\")", "socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1,", "connection): chunks = [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks),", "netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs ####################################", "command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message)", "\"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands)", "Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO = False APP = None mpdcmds", "DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP:", "self._outbuffer = [] self._echobuffer = [] self.server = hostname self.port = port self.socket", "= '#e0e0e0' base07 = '#ffffff' base08 = '#eb008a' base09 = '#f29333' base0A =", "int: return len(self._echobuffer) def pop_message(self) -> str: with self._io_lock: msg = self._inbuffer.pop() if", "| Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4>", "\"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda s,", "for key, mask in events: callback = key.data callback(key.fileobj, mask) def send(self, message:", "argparse import asyncio from re import DEBUG import selectors import socket import sys", "NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD instance\") parser.add_argument(\"-p\",", ") def indent(text: str, spaces=2): output = '' for l in text.splitlines(): output", "loop sort of #### def netpoll(): if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}]", "type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default: 0)\", type=bool, default=False, required=False)", "prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"),", "echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}]", "f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\",", "+= f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00 = '#000000' base01", "HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The", "if mask & selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection):", "x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer =", "funcptr = internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else:", "content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ),", "= Lock() self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ |", "self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock = Lock() self.state_lock", "mpd.data_available(): message = mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n')", "import List from prompt_toolkit import HTML from prompt_toolkit import print_formatted_text as print from", "(default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password (default:", "= Document( text=msg, cursor_position=0 ) def indent(text: str, spaces=2): output = '' for", "str(msg).strip() == 'OK': return None else: return msg def pop_echo(self) -> str: with", "except BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e,", "APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD instance\") parser.add_argument(\"-p\", \"--port\",", "MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD is running (default: 6600)\",", "f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds')", "Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1,", "+ file.name + \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() ->", "size isn't big enough or your network can't support it. For optimal performance", "def echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self) ->", "parser.add_argument(\"host\", help=\"The host of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which", "mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept #", "default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password (default: None)\", type=str, required=False)", "style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0", "if not DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if", "\"volume\"] internalcmds = { \"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda s, x:", "return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP,", "self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False", "= create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive", "return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\")", "(default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP buffer.", "\"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\",", "enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll", "height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field,", "MPD is running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with", "required=False) args = parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick port = args.port", "return len(self._echobuffer) > 0 def peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self) ->", "force_closed(self): with self.state_lock: return self._remote_closed def poll(self): events = self.selector.select() for key, mask", "(DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n'", "\"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ')", "wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone = HSplit([ lineup, debugnotice, lineup,", "performance choose a size with the power of two. (default: 4096)\", type=int, default=4096,", "message): with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return", "None else: return msg def pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop() def", "DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD instance\")", "datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK", "-> str: with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() == 'OK': return None", "events: callback = key.data callback(key.fileobj, mask) def send(self, message: str): with self._io_lock: self._outbuffer.append(message)", "prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output", "Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone =", "for file in files: output += ' - ' + file.name + \"\\n\"", "internal debug info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds", "NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4>", "BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\")", "def onsocketready(self, connection, mask): self.dbg_lastmask = mask if mask & selectors.EVENT_READ: self._receive(connection) if", "None self.interval = interval self.function = function self.args = args self.kwargs = kwargs", "f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output != '': netdebug_print( f\"[{", "in internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal", "= Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed", "prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles", "datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output", "open(SCRIPT_HOME / file) as mpcscript: data = mpcscript.read() return self.send(data) def disconnect(self, *argv):", "datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message = mpd.pop_message() if message:", "self.is_running = False self.next_call = time.time() self.start() def _run(self): self.is_running = False self.start()", "\"=== MPC Commands ===\\n\" for c in mpdcmds: output += str(c) + \"\\n\"", "(default: 4096)\", type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP = args.debug alive_tick =", "FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32,", "def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def", "commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter(", "network can't support it. For optimal performance choose a size with the power", "many seconds between a keep a live should be waited. (default: 3)\", type=int,", "import threading import time import selectors from datetime import datetime from pathlib import", "msg def pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop() def runscript(self, param): params", "base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\":", "}) def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def", "= key.data callback(key.fileobj, mask) def send(self, message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self,", "<c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4>", "Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed =", "with self._io_lock: return self._echobuffer.pop() def runscript(self, param): params = param.split(' ') file =", "debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer(", "echodbg_buffer = Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False,", "'#242424' baseA1 = '#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\": base0E,", "output view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one", "\"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\",", "'#00aabb' base0D = '#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00' baseA0 = '#242424'", "A message might get broken into multiple buffer if the size isn't big", "search_field, linedown, lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ] ), floats=[", "} class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer = None self.interval", "return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count):", "Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda", "lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run application. application =", "App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"),", "Buffer() netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field = TextArea(", "def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match = grammar.match(buff.text) if", "mpd.send(buff.text) if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e: tb", "self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0", "= datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### #", "debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit( [ Window( FormattedTextControl(", "= '#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff' base08 = '#eb008a' base09 =", "0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a keep a", "one TCP buffer. A message might get broken into multiple buffer if the", "your network can't support it. For optimal performance choose a size with the", "def send(self, message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask =", "'#f8ca12' base0B = '#FF6236' base0C = '#00aabb' base0D = '#0e5a94' base0E = '#b31e8d'", "big enough or your network can't support it. For optimal performance choose a", "base08 = '#eb008a' base09 = '#f29333' base0A = '#f8ca12' base0B = '#FF6236' base0C", "[] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, {", "\"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\",", "= '#505050' base04 = '#909090' base05 = '#bfbfbf' base06 = '#e0e0e0' base07 =", "None mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\",", "lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\",", "reset by peer\") try: match = grammar.match(buff.text) if match: params = match.variables() execcmd", "Buffer() echodbg_buffer = Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\",", "socket import sys import threading import time import selectors from datetime import datetime", "'#e0e0e0' base07 = '#ffffff' base08 = '#eb008a' base09 = '#f29333' base0A = '#f8ca12'", "wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser()", "int: return len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self) -> str:", "pop_echo(self) -> str: with self._io_lock: return self._echobuffer.pop() def runscript(self, param): params = param.split('", "prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown", "\"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\",", "multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\",", "= '#242424' baseA1 = '#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\":", "import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding", "\"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\",", "], style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document = Document( text=msg,", "def peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer) def", "DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP:", "= Buffer() echodbg_buffer = Buffer() input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \",", "style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field),", "from prompt_toolkit import HTML from prompt_toolkit import print_formatted_text as print from prompt_toolkit.application import", "), height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field,", "alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text", "def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit the user interface.\"\"\" event.app.exit() ####", "mask) def send(self, message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask", "parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD is running (default: 6600)\", type=int, default=6600,", "return len(self._echobuffer) def pop_message(self) -> str: with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip()", "' ' * spaces + l + '\\n' return output def accept(buff): if", "data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock:", "input_field = TextArea( height=1, lexer=lexer, completer=completer, prompt=\"❯ \", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, )", "def pop_message(self) -> str: with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() == 'OK':", "self.is_running: self.next_call += self.interval self._timer = threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running", "/ file) as mpcscript: data = mpcscript.read() return self.send(data) def disconnect(self, *argv): with", "Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus", "base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C,", "intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords },", "Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document = Document(", "@kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\")", "poll(self): events = self.selector.select() for key, mask in events: callback = key.data callback(key.fileobj,", "command\")) funcptr(mpd, params) else: cmd = params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input())", "s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd = params.get(\"func\") if cmd", "output = f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\"))", "= selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() /", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock =", "-> bool: with self._io_lock: return len(self._inbuffer) > 0 def echo_available(self) -> bool: with", "'#505050' base04 = '#909090' base05 = '#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff'", "spaces + l + '\\n' return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset", "\"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\",", "as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti,", "# SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = ''", "mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output = f'===", "_param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output = f'=== Available mpd shell", "base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0}", "import asyncio from re import DEBUG import selectors import socket import sys import", "\"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00 =", "\"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords =", "= '#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00' baseA0 = '#242424' baseA1 =", "lambda s, x: listscripts(s, x), \"help\": lambda s, x: apphelp(s, x), \"mpchelp\": lambda", "f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{", "self.initmsg = str(data, 'utf-8') self.socket_lock = Lock() self.state_lock = Lock() self._io_lock = Lock()", "data_available(self) -> bool: with self._io_lock: return len(self._inbuffer) > 0 def echo_available(self) -> bool:", "return len(self._inbuffer) > 0 def echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer) >", "debug info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between", "return len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self) -> str: with", "\", style=\"class:input\", multiline=False, wrap_lines=False, search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown =", "params = match.variables() execcmd = params.get(\"exec\") if execcmd is not None: params =", "= RepeatedTimer(1.0, netpoll) # Run application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(),", "+= \"=== Shell Commands ===\\n\" for c in internalcmds.keys(): output += str(c) +", "multiple buffer if the size isn't big enough or your network can't support", "\"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01}", "len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer)", "mpcscript: data = mpcscript.read() return self.send(data) def disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close',", "from pathlib import Path from threading import Lock from typing import List from", "\"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\",", "#################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output != '':", "\"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg", "Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False,", "def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running:", "if not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if", "xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP:", "= '#909090' base05 = '#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff' base08 =", "bool: with self._io_lock: return len(self._inbuffer) > 0 def echo_available(self) -> bool: with self._io_lock:", "= [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def", "mask & selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks", "def peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self) -> str: with self._io_lock: msg", "') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ')", "= output_field.text if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\")", "Lock from typing import List from prompt_toolkit import HTML from prompt_toolkit import print_formatted_text", "function self.args = args self.kwargs = kwargs self.is_running = False self.next_call = time.time()", "= asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP =", "str(data, 'utf-8') self.socket_lock = Lock() self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register(", "\"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\",", "Lock() self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE,", "= str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def", "'\\n' return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match", "), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown, debugzone,", "import SearchToolbar, TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096", "main loop sort of #### def netpoll(): if not mpd: return sockdebug_print( f\"[{", "{mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print(", "\"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\",", "\"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\",", "style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown,", "= self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock = Lock() self.state_lock = Lock() self._io_lock", "baseA1 = '#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\":", "params[0] with open(SCRIPT_HOME / file) as mpcscript: data = mpcscript.read() return self.send(data) def", "finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except", "if args.debug: debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container", "import ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar, TextArea selector =", "self.is_running = False class MPDClient(object): def __init__(self, hostname: str, port: int): self.selector =", "accept # The key bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -=", "SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar,", "from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls", "KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl,", "[] self._outbuffer = [] self._echobuffer = [] self.server = hostname self.port = port", "live should be waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands", "def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def", "= Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document =", "# Run application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False,", "{mpd.dbg_lastmask}\") new_text = output_field.text if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW):", ") nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer),", "mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval,", "GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings", "self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8')", "lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window(", "mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message = mpd.pop_message() if message: isonow", "#################################### # SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output =", "RepeatedTimer(1.0, netpoll) # Run application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True,", "or local_output != '': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll()", "autopoll = RepeatedTimer(1.0, netpoll) # Run application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb,", "str: with self._io_lock: return self._echobuffer.pop() def runscript(self, param): params = param.split(' ') file", "if recv_output != '' or local_output != '': output_field.document = Document( text=new_text, cursor_position=len(new_text))", "your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD is running (default:", "pop_message(self) -> str: with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() == 'OK': return", "cursor_position=0 ) def indent(text: str, spaces=2): output = '' for l in text.splitlines():", "def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO, APP parser", "x), \"help\": lambda s, x: apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s, x),", "WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if", "\"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\":", "create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick:", "linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\")", "\"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg def", "'utf-8')) def _transmit(self, connection): with self._io_lock: while len(self._outbuffer) > 0: msg = self._outbuffer.pop()", "parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\",", "(default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written into", "file) as mpcscript: data = mpcscript.read() return self.send(data) def disconnect(self, *argv): with self.socket_lock:", "import CompletionsMenu from prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import", "= '#ffffff' base08 = '#eb008a' base09 = '#f29333' base0A = '#f8ca12' base0B =", "def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def", "f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\":", "onsocketready(self, connection, mask): self.dbg_lastmask = mask if mask & selectors.EVENT_READ: self._receive(connection) if mask", "prompt_toolkit import print_formatted_text as print from prompt_toolkit.application import Application, application from prompt_toolkit.completion import", "data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock = Lock() self.state_lock = Lock()", "Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False,", "SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords", "mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()})", "\"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\",", "this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default:", "kwargs self.is_running = False self.next_call = time.time() self.start() def _run(self): self.is_running = False", "'#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff' base08 = '#eb008a' base09 = '#f29333'", "= grammar.match(buff.text) if match: params = match.variables() execcmd = params.get(\"exec\") if execcmd is", "lambda s, x: apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\": lambda", "input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output", "wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\",", "if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN):", "\"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\",", "TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output", "if not DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def indent(text: str,", "if str(msg).strip() == 'OK': return None else: return msg def pop_echo(self) -> str:", "{base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\",", "), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup,", "socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock = Lock()", "selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home()", "can't support it. For optimal performance choose a size with the power of", "as print from prompt_toolkit.application import Application, application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler", "local_output != '': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end", "x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x), \"help\": lambda s, x: apphelp(s,", "\"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\",", "might get broken into multiple buffer if the size isn't big enough or", "x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run application. application = Application(", "DEBUGAPP = False NOECHO = False APP = None mpdcmds = [ \"add\",", "\"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\",", "\"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda", "self.selector.select() for key, mask in events: callback = key.data callback(key.fileobj, mask) def send(self,", "def listscripts(mpd, _param): output = f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\" ==='", "HSplit([]) if args.debug: debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown])", "get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global DEBUGAPP, NOECHO, APP parser =", "def peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer) def", "str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self):", "disconnect(self, *argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection closed", "str, spaces=2): output = '' for l in text.splitlines(): output += ' '", "file = params[0] with open(SCRIPT_HOME / file) as mpcscript: data = mpcscript.read() return", "function, *args, **kwargs): self._timer = None self.interval = interval self.function = function self.args", "written into the output view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The", "loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP", "def mpchelp(mpd, _param): output = '' output += \"=== MPC Commands ===\\n\" for", "mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks = [] with self._io_lock: data", "SearchToolbar() # For reverse search. output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer =", "<reponame>SirJson/mpdshell #!/usr/bin/env python3 import argparse import asyncio from re import DEBUG import selectors", "datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text +=", "\"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\",", "+= ' - ' + file.name + \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}'", "spaces=2): output = '' for l in text.splitlines(): output += ' ' *", "\"\"\" ) def mpchelp(mpd, _param): output = '' output += \"=== MPC Commands", "choose a size with the power of two. (default: 4096)\", type=int, default=4096, required=False)", "False class MPDClient(object): def __init__(self, hostname: str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer", "_param): output = f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\" ===' files =", "mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg: isonow", "mpd = MPDClient(args.host, port) grammar = create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯", "text.splitlines(): output += ' ' * spaces + l + '\\n' return output", "input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output", "def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\"", "(?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output", "3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written into the", "\"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\":", "import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout from prompt_toolkit.layout.menus import CompletionsMenu from", "required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a keep a live should be", "SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO = False APP =", "\"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\",", "with self._io_lock: return len(self._inbuffer) > 0 def echo_available(self) -> bool: with self._io_lock: return", "lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit( [ Window(", "{}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping') def ping_unchecked(self): try: self.send('ping')", "msg = self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message):", "mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\",", "parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written into the output view (default: 0)\",", "Debug Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix,", "\"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\",", "\"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\",", "+= f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO", "file in files: output += ' - ' + file.name + \"\\n\" output", "if args.secret is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop() mpd.disconnect() if __name__", "\"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x), \"help\": lambda", "_(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit the user interface.\"\"\" event.app.exit() #### #", "' * spaces + l + '\\n' return output def accept(buff): if mpd.force_closed():", "print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping')", "with self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self) -> int: return len(self._inbuffer) def", "base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\":", "FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings", "match.variables() execcmd = params.get(\"exec\") if execcmd is not None: params = params.get(\"execparam\") funcptr", "is not None: params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s, x:", "\"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\",", "on which MPD is running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize", "tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler", "'#303030' base03 = '#505050' base04 = '#909090' base05 = '#bfbfbf' base06 = '#e0e0e0'", "\"=== Shell Commands ===\\n\" for c in internalcmds.keys(): output += str(c) + \"\\n\"", "resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output = f'=== Available mpd", "Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False,", "netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}]", "import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document", "\"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\", \"listpartitions\", \"listplaylist\", \"listplaylistinfo\", \"listplaylists\", \"load\", \"lsinfo\", \"mixrampdb\",", "password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default: 0)\",", "'utf-8')) except BaseException as ex: print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close()", "str: with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() == 'OK': return None else:", "SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds) keywords =", "to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer:", "= HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> |", "\"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda s, x: s.runscript(x),", "self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self.next_call += self.interval self._timer = threading.Timer(", "hostname: str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = []", "self._io_lock: return len(self._inbuffer) > 0 def echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer)", "the user interface.\"\"\" event.app.exit() #### # Here happens the main loop sort of", "not DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if not", "RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run application. application", "base05 = '#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff' base08 = '#eb008a' base09", "KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500", "message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN):", "user interface.\"\"\" event.app.exit() #### # Here happens the main loop sort of ####", "chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock: while len(self._outbuffer) > 0: msg", "False self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self.next_call += self.interval self._timer", "key bindings. kb = KeyBindings() @kb.add(\"pageup\") def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def", "= args.debug alive_tick = args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd =", "'#f29333' base0A = '#f8ca12' base0B = '#FF6236' base0C = '#00aabb' base0D = '#0e5a94'", "\"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\",", "s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x), \"help\": lambda s, x: apphelp(s, x),", "from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from", "\"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\",", "= '' output += \"=== Shell Commands ===\\n\" for c in internalcmds.keys(): output", "= argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port", "def onpageup(_event): output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\")", "x: mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self,", "| App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\":", "import socket import sys import threading import time import selectors from datetime import", "\"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\",", "List from prompt_toolkit import HTML from prompt_toolkit import print_formatted_text as print from prompt_toolkit.application", "netpoll(): if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{", "\"--port\", help=\"The port on which MPD is running (default: 6600)\", type=int, default=6600, required=False)", "message = mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print(", "container = FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\", ), Window(", "f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text", "client_settings ), height=1, style=\"class:title\", ), linedown, debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup,", "with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() == 'OK': return None else: return", "'#7a2d00' baseA0 = '#242424' baseA1 = '#06A191' return Style.from_dict( { \"function\": base0D, \"parameter\":", "f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY", "\"load\", \"lsinfo\", \"mixrampdb\", \"mixrampdelay\", \"mount\", \"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\",", "APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output = f'=== Available mpd shell scripts", "is running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this", "= RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run application.", "key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP = application if args.secret is", "DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def indent(text: str, spaces=2): output", "bool: self.send('ping') def ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed = True", "self.state_lock = Lock() self._io_lock = Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready)", "= Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ),", "= False self.dbg_lastmask = 0x0 def data_available(self) -> bool: with self._io_lock: return len(self._inbuffer)", ") commands = [] commands.extend(mpdcmds) keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer =", "') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken>", "if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer", "str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = [] self._echobuffer", "4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO = False APP", "For optimal performance choose a size with the power of two. (default: 4096)\",", "self.interval self._timer = threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running = True def", "datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output != '' or local_output", "= param.split(' ') file = params[0] with open(SCRIPT_HOME / file) as mpcscript: data", "prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers", "\"func\": keywords, \"exec\": intern_keywords }, ) search_field = SearchToolbar() # For reverse search.", "= time.time() self.start() def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self):", "_transmit(self, connection): with self._io_lock: while len(self._outbuffer) > 0: msg = self._outbuffer.pop() command =", "wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno,", "output += \"=== Shell Commands ===\\n\" for c in internalcmds.keys(): output += str(c)", "echo_available(self) -> bool: with self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self) -> int:", "socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1,", "optimal performance choose a size with the power of two. (default: 4096)\", type=int,", "= [] self._echobuffer = [] self.server = hostname self.port = port self.socket =", "\"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\", \"playlistinfo\", \"playlistmove\", \"playlistsearch\", \"plchanges\", \"plchangesposid\", \"previous\", \"prio\",", "\"--debug\", help=\"Show internal debug info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How", "\"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\",", "recv_output != '' or local_output != '': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate()", "height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone", "def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C", "get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd =", "self._inbuffer = [] self._outbuffer = [] self._echobuffer = [] self.server = hostname self.port", "</linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken>", "class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer = None self.interval =", "= params[0] with open(SCRIPT_HOME / file) as mpcscript: data = mpcscript.read() return self.send(data)", "base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\":", "\"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda s, x: s.runscript(x), \"scripts\": lambda s,", "netpoll) # Run application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True,", "= '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo()", "def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def", "((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output = '' output += \"=== MPC", "s, x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer", "== 'OK': return None else: return msg def pop_echo(self) -> str: with self._io_lock:", "by peer\") try: match = grammar.match(buff.text) if match: params = match.variables() execcmd =", "from prompt_toolkit import print_formatted_text as print from prompt_toolkit.application import Application, application from prompt_toolkit.completion", "if execcmd is not None: params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda", "import argparse import asyncio from re import DEBUG import selectors import socket import", "') file = params[0] with open(SCRIPT_HOME / file) as mpcscript: data = mpcscript.read()", "prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit,", "application.exit(result=\"Connection reset by peer\") try: match = grammar.match(buff.text) if match: params = match.variables()", "+= local_output if recv_output != '' or local_output != '': output_field.document = Document(", "\"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno,", "a size with the power of two. (default: 4096)\", type=int, default=4096, required=False) args", "f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION:", "= SearchToolbar() # For reverse search. output_field = Buffer() netdbg_buffer = Buffer() socketdbg_buffer", "len(self._outbuffer) > 0: msg = self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command, 'utf-8'))", "self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask = 0x0 def data_available(self)", "in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input())", "data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock: while len(self._outbuffer) > 0:", "mpd.local_echo(invalid_input()) except BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format(", "{base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02}", "interval self.function = function self.args = args self.kwargs = kwargs self.is_running = False", "end #################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll)", "return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK:", "FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer),", "if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\":", "with self.state_lock: self._remote_closed = True def force_closed(self): with self.state_lock: return self._remote_closed def poll(self):", "(?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" ) def mpchelp(mpd, _param): output = '' output +=", "help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> |", "self._transmit(connection) def _receive(self, connection): chunks = [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if", "s, x: apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\": lambda s,", "text=msg, cursor_position=0 ) def indent(text: str, spaces=2): output = '' for l in", "\"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\",", "help=\"The size of one TCP buffer. A message might get broken into multiple", "if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{", "\"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\",", "= GrammarCompleter( grammar, { \"func\": keywords, \"exec\": intern_keywords }, ) search_field = SearchToolbar()", "mpchelp(mpd, _param): output = '' output += \"=== MPC Commands ===\\n\" for c", "False self.dbg_lastmask = 0x0 def data_available(self) -> bool: with self._io_lock: return len(self._inbuffer) >", "MPDClient(args.host, port) grammar = create_grammar() intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings", "with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection):", "with self.state_lock: return self._remote_closed def poll(self): events = self.selector.select() for key, mask in", "# netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll =", ") def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0 )", "= Window( FormattedTextControl( HTML(\"<b>== Debug Info ==</b>\") ), height=1, style=\"class:title\", ) nettickwnd =", "def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ')", "to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar = create_grammar() intro_text = HTML(f\"Connected to:", "echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{", "==</b>\") ), height=1, style=\"class:title\", ) nettickwnd = Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\")", "self.dbg_lastmask = 0x0 def data_available(self) -> bool: with self._io_lock: return len(self._inbuffer) > 0", "return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try: match =", "f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output != '' or", "Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00 = '#000000' base01 = '#202020'", "scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document =", "the power of two. (default: 4096)\", type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP", "args.debug: debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container =", "self.start() def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self): if not", "'#eb008a' base09 = '#f29333' base0A = '#f8ca12' base0B = '#FF6236' base0C = '#00aabb'", "self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks = [] with", "python3 import argparse import asyncio from re import DEBUG import selectors import socket", "from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter", "= '' for l in text.splitlines(): output += ' ' * spaces +", "\"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\",", "callback(key.fileobj, mask) def send(self, message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask):", "import selectors from datetime import datetime from pathlib import Path from threading import", "= Window( BufferControl(buffer=netdbg_buffer), height=1, get_line_prefix=get_netdbg_prefix, wrap_lines=False, style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix,", "lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd = params.get(\"func\") if", "\"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\":", "runscript(self, param): params = param.split(' ') file = params[0] with open(SCRIPT_HOME / file)", "= params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text", "#################################### # SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output =", "tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The key bindings. kb = KeyBindings() @kb.add(\"pageup\")", "output = '' for l in text.splitlines(): output += ' ' * spaces", "'' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if", "netdebug_print(msg): if not DEBUGAPP: return netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def sockdebug_print(msg):", "= Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR ) APP =", "= '#eb008a' base09 = '#f29333' base0A = '#f8ca12' base0B = '#FF6236' base0C =", "instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD is running (default: 6600)\", type=int,", "TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output !=", "lambda s, x: s.runscript(x), \"scripts\": lambda s, x: listscripts(s, x), \"help\": lambda s,", "internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd =", "linedown, lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ] ), floats=[ Float(", "\"base\": f\"bg:{baseA0} {base05}\", \"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\",", "+ \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param): output", "f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" })", "= selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = [] self._echobuffer = [] self.server =", "args = parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick port = args.port print(f\"Connecting", "intro_text = HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4>", "msg = self._inbuffer.pop() if str(msg).strip() == 'OK': return None else: return msg def", "+ '\\n' return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\") try:", "apphelp(s, x), \"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x)", "help=\"The host of your MPD instance\") parser.add_argument(\"-p\", \"--port\", help=\"The port on which MPD", "char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>== Debug", "RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO =", "#### def netpoll(): if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll()", "base0F, \"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03,", "re import DEBUG import selectors import socket import sys import threading import time", "# SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text =", "def ping_unchecked(self): try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed = True def force_closed(self):", "= self._inbuffer.pop() if str(msg).strip() == 'OK': return None else: return msg def pop_echo(self)", "self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with", "with this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info", "\"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\", \"playlistclear\", \"playlistdelete\", \"playlistfind\", \"playlistid\",", "FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import Layout", "port on which MPD is running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\",", "key.data callback(key.fileobj, mask) def send(self, message: str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection,", "invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count): return HTML('<linetoken><b>»</b></linetoken> ') def get_netdbg_prefix(lineno, wrap_count):", "while mpd.data_available(): message = mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}]", "params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params)", "self.is_running = True def stop(self): self._timer.cancel() self.is_running = False class MPDClient(object): def __init__(self,", "type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of one TCP buffer. A message", "\"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\",", "for l in text.splitlines(): output += ' ' * spaces + l +", "with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) |", "style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ),", "output_field.cursor_position -= 500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event):", "cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0", "\"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\", \"playid\", \"playlist\", \"playlistadd\",", "= Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field = TextArea( height=1, lexer=lexer,", "events = self.selector.select() for key, mask in events: callback = key.data callback(key.fileobj, mask)", "#################################### # SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") new_text", "cmd = params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if", "asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() / 'mpdscripts' DEBUGAPP = False NOECHO", "[] self._echobuffer = [] self.server = hostname self.port = port self.socket = socket.socket(socket.AF_INET,", "_param): output = '' output += \"=== MPC Commands ===\\n\" for c in", "should be waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't", "keywords = WordCompleter(commands) intern_keywords = WordCompleter(internalcmds.keys()) completer = GrammarCompleter( grammar, { \"func\": keywords,", "\"move\", \"moveid\", \"moveoutput\", \"newpartition\", \"next\", \"notcommands\", \"outputs\", \"outputset\", \"partition\", \"password\", \"pause\", \"ping\", \"play\",", "\"--no-echo\", help=\"Own commands don't get written into the output view (default: 0)\", type=bool,", "\"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def invalid_input(msg=\"Invalid command\"): return msg def get_line_prefix(lineno, wrap_count):", "output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output !=", "(Float, FloatContainer, HSplit, Window) from prompt_toolkit.layout.controls import (Buffer, BufferControl, FormattedTextControl) from prompt_toolkit.layout.layout import", "mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer:", "mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}]", "prompt_toolkit.lexers import SimpleLexer from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets", "datetime from pathlib import Path from threading import Lock from typing import List", "❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> |", "datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}]", "@kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit the user interface.\"\"\" event.app.exit()", "BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame,", "'\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock:", "cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit()", "import DEBUG import selectors import socket import sys import threading import time import", "\"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\", \"idle\", \"kill\", \"list\", \"listall\", \"listallinfo\", \"listfiles\", \"listmounts\", \"listneighbors\",", "= HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit(", "recv_output = '' while mpd.data_available(): message = mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds')", "- ' + file.name + \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def", "parser.parse_args() DEBUGAPP = args.debug alive_tick = args.alive_tick port = args.port print(f\"Connecting to {args.host}@{port}...\")", "from prompt_toolkit.application import Application, application from prompt_toolkit.completion import WordCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile", "base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B, \"debug\": f\"bg:{base01}", "enough or your network can't support it. For optimal performance choose a size", "Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice = Window( FormattedTextControl( HTML(\"<b>==", "from re import DEBUG import selectors import socket import sys import threading import", "remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping') def ping_unchecked(self): try:", "self.function = function self.args = args self.kwargs = kwargs self.is_running = False self.next_call", "interface.\"\"\" event.app.exit() #### # Here happens the main loop sort of #### def", "def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) |", "power of two. (default: 4096)\", type=int, default=4096, required=False) args = parser.parse_args() DEBUGAPP =", "s, x: mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x) } class RepeatedTimer(object): def", "import Lock from typing import List from prompt_toolkit import HTML from prompt_toolkit import", "f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output != '': netdebug_print(", "'' output += \"=== MPC Commands ===\\n\" for c in mpdcmds: output +=", "BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\")", "SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = [] commands.extend(mpdcmds)", "seconds between a keep a live should be waited. (default: 3)\", type=int, default=3,", "self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask = mask if mask & selectors.EVENT_READ:", "HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command", "help_text ), height=1, style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1),", "mask if mask & selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self,", "{ \"func\": keywords, \"exec\": intern_keywords }, ) search_field = SearchToolbar() # For reverse", "'#909090' base05 = '#bfbfbf' base06 = '#e0e0e0' base07 = '#ffffff' base08 = '#eb008a'", "with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask = mask if mask &", "args.secret is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop() mpd.disconnect() if __name__ ==", "\"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\",", "self.next_call += self.interval self._timer = threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running =", "def _receive(self, connection): chunks = [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data:", "import print_formatted_text as print from prompt_toolkit.application import Application, application from prompt_toolkit.completion import WordCompleter", "close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*)", "mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run application. application = Application( layout=Layout(container, focused_element=input_field),", "list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output += ' - ' + file.name +", "get_netdbg_prefix(lineno, wrap_count): return HTML('<linetoken>NETTICK: </linetoken> ') def get_socketdbg_prefix(lineno, wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def", "'' while mpd.data_available(): message = mpd.pop_message() if message: isonow = datetime.now().isoformat(timespec='seconds') recv_output +=", "param): params = param.split(' ') file = params[0] with open(SCRIPT_HOME / file) as", "-> bool: with self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self) -> int: return", "input_field, search_field, linedown, lineup, Window( FormattedTextControl( help_text ), height=1, style=\"class:toolbar\", ), ] ),", "= match.variables() execcmd = params.get(\"exec\") if execcmd is not None: params = params.get(\"execparam\")", "<c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down:", "\"plchangesposid\", \"previous\", \"prio\", \"prioid\", \"random\", \"rangeid\", \"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\",", "\"readcomments\", \"readmessages\", \"readpicture\", \"rename\", \"repeat\", \"replay_gain_mode\", \"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\",", "self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self.next_call +=", "\"toolbar\": f\"bg:{base01} {baseA1}\", \"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\":", "will exit the user interface.\"\"\" event.app.exit() #### # Here happens the main loop", "-> int: return len(self._inbuffer) def peek_outbuffer(self) -> int: return len(self._outbuffer) def peek_echobuffer(self) ->", "*argv): with self.socket_lock: try: self.socket.sendall(bytes('close', 'utf-8')) except BaseException as ex: print(\"Connection closed by", "netdbg_buffer = Buffer() socketdbg_buffer = Buffer() echodbg_buffer = Buffer() input_field = TextArea( height=1,", "Available mpd shell scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in", "create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*) \"\"\" )", "echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug:", "mask): self.dbg_lastmask = mask if mask & selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE:", "\"output\": base0B, \"debug\": f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\":", "echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output != '':", "help=\"Show internal debug info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many", "echownd, linedown]) container = FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ), height=1, style=\"class:title\",", "+= self.interval self._timer = threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running = True", "application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction:", "self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+) | ((?P<func>[a-z]+)\\s(?P<params>\\+?[a-zA-Z0-9.\\/\\:\\\\\\-\\_\\s]+)\\s*)", "support it. For optimal performance choose a size with the power of two.", "\"exec\": intern_keywords }, ) search_field = SearchToolbar() # For reverse search. output_field =", "wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([])", "isonow = datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()})", "'' for l in text.splitlines(): output += ' ' * spaces + l", "broken into multiple buffer if the size isn't big enough or your network", "= HSplit([]) if args.debug: debugzone = HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd,", "f\"bg:{base01} {base0A}\", \"input\": f\"bg:{base01} {base04}\", \"linetoken\": base0C, \"line\": base03, \"base\": f\"bg:{baseA0} {base05}\", \"toolbar\":", "sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0 ) def echodbg_print(msg):", "echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\")", "{mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message = mpd.pop_message() if message: isonow =", "prompt_toolkit.widgets import SearchToolbar, TextArea selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE =", "else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as", "(DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask:", "\"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\", \"subscribe\", \"swap\", \"swapid\",", "x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd = params.get(\"func\") if cmd not", "= '#303030' base03 = '#505050' base04 = '#909090' base05 = '#bfbfbf' base06 =", "Shell Commands ===\\n\" for c in internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output)", "return Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F,", "Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>\") lexer = GrammarLexer(", "'#ffffff' base08 = '#eb008a' base09 = '#f29333' base0A = '#f8ca12' base0B = '#FF6236'", "which MPD is running (default: 6600)\", type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection", "selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) RECV_BUFFER_SIZE = 4096 SCRIPT_HOME = Path.home() / 'mpdscripts'", "= HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not", "= params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd,", "of #### def netpoll(): if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\")", "RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer = None self.interval = interval", "x: listscripts(s, x), \"help\": lambda s, x: apphelp(s, x), \"mpchelp\": lambda s, x:", "search_field=search_field, ) lineup = Window(height=1, char=\"▁\", style=\"class:line\") linedown = Window(height=1, char=\"▔\", style=\"class:line\") debugnotice", "= None mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\", \"clearerror\", \"cleartagid\",", "+= indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION:", "base01 = '#202020' base02 = '#303030' base03 = '#505050' base04 = '#909090' base05", "into multiple buffer if the size isn't big enough or your network can't", "\"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame:", "recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### #", "wrap_count): return HTML('<linetoken>SOCKET:</linetoken> ') def get_echodbg_prefix(lineno, wrap_count): return HTML('<linetoken>SYSECHO:</linetoken> ') def main(): global", "Document( text=msg, cursor_position=0 ) def echodbg_print(msg): if not DEBUGAPP: return echodbg_buffer.document = Document(", "-> int: return len(self._outbuffer) def peek_echobuffer(self) -> int: return len(self._echobuffer) def pop_message(self) ->", "\"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\", \"single\", \"stats\", \"status\", \"sticker\", \"stop\",", "= Lock() self.socket.setblocking(False) self.selector.register( self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready) self._remote_closed = False self.dbg_lastmask", ") def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document = Document( text=msg, cursor_position=0 )", "self._io_lock: return len(self._echobuffer) > 0 def peek_inbuffer(self) -> int: return len(self._inbuffer) def peek_outbuffer(self)", "while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg = mpd.pop_echo() if echomsg:", "base0F = '#7a2d00' baseA0 = '#242424' baseA1 = '#06A191' return Style.from_dict( { \"function\":", "height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown, debugzone, Window(", "port = args.port print(f\"Connecting to {args.host}@{port}...\") mpd = MPDClient(args.host, port) grammar = create_grammar()", "HTML(f\"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>\") client_settings = HTML(f\"Keep alive tick: <c4>{alive_tick}</c4> | TCP", "import GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import", "required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written into the output view (default:", "Path from threading import Lock from typing import List from prompt_toolkit import HTML", "mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\")", "type=int, default=6600, required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password (default: None)\", type=str,", "main(): global DEBUGAPP, NOECHO, APP parser = argparse.ArgumentParser() parser.add_argument(\"host\", help=\"The host of your", "nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit( [ Window( FormattedTextControl( intro_text ),", "cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(),", "Style: base00 = '#000000' base01 = '#202020' base02 = '#303030' base03 = '#505050'", "x), \"reset\": lambda s, x: resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval, function,", "match: params = match.variables() execcmd = params.get(\"exec\") if execcmd is not None: params", "get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone = HSplit([]) if args.debug: debugzone = HSplit([ lineup, debugnotice,", "intro_text ), height=1, style=\"class:title\", ), Window( FormattedTextControl( client_settings ), height=1, style=\"class:title\", ), linedown,", "files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output += ' - ' +", "if the size isn't big enough or your network can't support it. For", "local_output = '' while mpd.echo_available(): echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") echomsg =", "+= \"=== MPC Commands ===\\n\" for c in mpdcmds: output += str(c) +", "scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output +=", "500 @kb.add(\"pagedown\") def onpagedown(_event): output_field.cursor_position += 500 @kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q", "asyncio from re import DEBUG import selectors import socket import sys import threading", "{message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") #################################### # SECTION: READBACK ECHOs", "file.name + \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style:", "Run application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR", "style=\"class:debug\") socketwnd = Window( BufferControl(buffer=socketdbg_buffer), height=1, get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer),", "===\\n\" for c in internalcmds.keys(): output += str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd,", "= '#7a2d00' baseA0 = '#242424' baseA1 = '#06A191' return Style.from_dict( { \"function\": base0D,", "from prompt_toolkit.output import ColorDepth from prompt_toolkit.styles import Style from prompt_toolkit.widgets import SearchToolbar, TextArea", "try: match = grammar.match(buff.text) if match: params = match.variables() execcmd = params.get(\"exec\") if", "+ \"\\n\" output += f'\\n\\n----\\n=> Total: {len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00", "in text.splitlines(): output += ' ' * spaces + l + '\\n' return", "e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The key bindings. kb =", "start(self): if not self.is_running: self.next_call += self.interval self._timer = threading.Timer( self.next_call - time.time(),", "mpd.poll() netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") ####################################", "ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ], style=\"class:base\" ) def netdebug_print(msg): if not DEBUGAPP: return", "self.selector = selectors.DefaultSelector() self._inbuffer = [] self._outbuffer = [] self._echobuffer = [] self.server", "str): with self._io_lock: self._outbuffer.append(message) def onsocketready(self, connection, mask): self.dbg_lastmask = mask if mask", "if mask & selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks = [] with self._io_lock:", "info (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-a\", \"--alive-tick\", help=\"How many seconds between a", "from datetime import datetime from pathlib import Path from threading import Lock from", "f\"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}\") #################################### # SECTION: READBACK COLLECT #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}]", "def netpoll(): if not mpd: return sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") mpd.poll() netdebug_print(", "isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") ####################################", "+= recv_output if local_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\")", "{ \"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\": base0B,", "\"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\",", "self.socket.recv(RECV_BUFFER_SIZE) self.initmsg = str(data, 'utf-8') self.socket_lock = Lock() self.state_lock = Lock() self._io_lock =", "else: mpd.local_echo(invalid_input()) except BaseException as e: tb = sys.exc_info()[2] mpd.local_echo(\"\\n\\nError: {}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine:", "= '#f29333' base0A = '#f8ca12' base0B = '#FF6236' base0C = '#00aabb' base0D =", "apphelp(mpd, _param): output = '' output += \"=== Shell Commands ===\\n\" for c", "<c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit:", "a keep a live should be waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\",", "\"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\", \"findadd\", \"getfingerprint\",", "while len(self._outbuffer) > 0: msg = self._outbuffer.pop() command = str(msg + '\\n') connection.sendall(bytes(command,", "self._io_lock: return self._echobuffer.pop() def runscript(self, param): params = param.split(' ') file = params[0]", "help=\"How many seconds between a keep a live should be waited. (default: 3)\",", "netdbg_buffer.document = Document( text=msg, cursor_position=0 ) def sockdebug_print(msg): if not DEBUGAPP: return socketdbg_buffer.document", "size with the power of two. (default: 4096)\", type=int, default=4096, required=False) args =", "= list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output += ' - ' + file.name", "@kb.add(\"c-c\") @kb.add(\"c-q\") def _(event): \"\"\"Pressing Ctrl-Q or Ctrl-C will exit the user interface.\"\"\"", "'' output += \"=== Shell Commands ===\\n\" for c in internalcmds.keys(): output +=", "'': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW):", "l + '\\n' return output def accept(buff): if mpd.force_closed(): application.exit(result=\"Connection reset by peer\")", "Ctrl-C will exit the user interface.\"\"\" event.app.exit() #### # Here happens the main", "self._remote_closed = True def force_closed(self): with self.state_lock: return self._remote_closed def poll(self): events =", "ECHOs #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") local_output = '' while mpd.echo_available(): echodbg_print(", "params.get(\"exec\") if execcmd is not None: params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:],", "{}\\n\\tFrame: {}\\n\\tInstruction: {}\\n\\tLine: {}\".format( e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno)) input_field.accept_handler = accept # The", "execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\")) funcptr(mpd, params) else: cmd = params.get(\"func\")", "connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8')) def _transmit(self, connection): with self._io_lock: while len(self._outbuffer)", "f\"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY #################################### sockdebug_print(", "= mask if mask & selectors.EVENT_READ: self._receive(connection) if mask & selectors.EVENT_WRITE: self._transmit(connection) def", "HSplit([ lineup, debugnotice, lineup, nettickwnd, socketwnd, echownd, linedown]) container = FloatContainer( content=HSplit( [", "= args self.kwargs = kwargs self.is_running = False self.next_call = time.time() self.start() def", "\"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for file in files: output += ' -", "GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, )", "= 0x0 def data_available(self) -> bool: with self._io_lock: return len(self._inbuffer) > 0 def", "{len(files)}' mpd.local_echo(output) def gen_style() -> Style: base00 = '#000000' base01 = '#202020' base02", "params.get(\"func\") if cmd not in mpdcmds: mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text ==", "mpd.local_echo(invalid_input()) else: mpd.local_echo(buff.text) mpd.send(buff.text) if buff.text == \"close\": application.exit() else: mpd.local_echo(invalid_input()) except BaseException", "autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd) autopoll = RepeatedTimer(1.0, netpoll) # Run", "Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit, Window) from", "return self._echobuffer.pop() def runscript(self, param): params = param.split(' ') file = params[0] with", "base0B = '#FF6236' base0C = '#00aabb' base0D = '#0e5a94' base0E = '#b31e8d' base0F", "= f'=== Available mpd shell scripts in \"{SCRIPT_HOME}\" ===' files = list(SCRIPT_HOME.glob(\"*.ncs\")) for", "output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += recv_output if local_output !=", "application. application = Application( layout=Layout(container, focused_element=input_field), key_bindings=kb, style=gen_style(), mouse_support=True, full_screen=True, enable_page_navigation_bindings=False, color_depth=ColorDepth.TRUE_COLOR )", "!help)</b>\") lexer = GrammarLexer( grammar, lexers={ \"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\":", "#90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\" }) def", "chunks = [] with self._io_lock: data = connection.recv(RECV_BUFFER_SIZE) if data: chunks.append(data) self._inbuffer.append(str(b''.join(chunks), 'utf-8'))", "= mpd.pop_echo() if echomsg: isonow = datetime.now().isoformat(timespec='seconds') local_output += f'\\n{echomsg}\\n' echodbg_print( f\"[{ datetime.now().isoformat()}]", "[] self.server = hostname self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port))", "height=1, style=\"class:toolbar\", ), ] ), floats=[ Float( xcursor=True, ycursor=True, content=CompletionsMenu(max_height=32, scroll_offset=1), ) ],", "selectors from datetime import datetime from pathlib import Path from threading import Lock", "{mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\")", "= datetime.now().isoformat(timespec='seconds') recv_output += indent(f'\\n[{isonow}] {message}\\n') netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\")", "!= '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer", "mask: {mpd.dbg_lastmask}\") new_text = output_field.text if recv_output != '': netdebug_print( f\"[{ datetime.now().isoformat()}] netbuffer", "\"channels\", \"clear\", \"clearerror\", \"cleartagid\", \"close\", \"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\",", "& selectors.EVENT_WRITE: self._transmit(connection) def _receive(self, connection): chunks = [] with self._io_lock: data =", "\\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import", "def _transmit(self, connection): with self._io_lock: while len(self._outbuffer) > 0: msg = self._outbuffer.pop() command", "required=False) parser.add_argument(\"-s\", \"--secret\", help=\"Initialize connection with this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\",", "self.args = args self.kwargs = kwargs self.is_running = False self.next_call = time.time() self.start()", "self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar(): return compile( r\"\"\" (?P<exec>\\![a-z]+) | ((?P<exec>\\![a-z]+)\\s(?P<execparam>[a-zA-Z0-9.\\/\\\\\\-\\_\\s]+)\\s*) | (?P<func>[a-z]+)", "the output view (default: 0)\", type=bool, default=False, required=False) parser.add_argument(\"-b\", \"--buffer-size\", help=\"The size of", "sockdebug_print( f\"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}\") recv_output = '' while mpd.data_available(): message = mpd.pop_message()", "Ctrl-Q or Ctrl-C will exit the user interface.\"\"\" event.app.exit() #### # Here happens", "MPC Commands ===\\n\" for c in mpdcmds: output += str(c) + \"\\n\" mpd.local_echo(output)", "base07 = '#ffffff' base08 = '#eb008a' base09 = '#f29333' base0A = '#f8ca12' base0B", "connection with this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug", "\"mpchelp\": lambda s, x: mpchelp(s, x), \"reset\": lambda s, x: resetterm(s,x) } class", "False APP = None mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\", \"clear\",", "(DRAIN): {mpd.peek_echobuffer()}\") #################################### # SECTION WRITE TO TTY #################################### sockdebug_print( f\"[{ datetime.now().isoformat()}] mask:", "\"\\n\" mpd.local_echo(output) def apphelp(mpd, _param): output = '' output += \"=== Shell Commands", "str(c) + \"\\n\" mpd.local_echo(output) def resetterm(mpd, _param): APP.reset() mpd.local_echo(\"Terminal reset!\") def listscripts(mpd, _param):", "= '#FF6236' base0C = '#00aabb' base0D = '#0e5a94' base0E = '#b31e8d' base0F =", "echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if recv_output != ''", "not None: params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown", "debugzone, Window( BufferControl(buffer=output_field), get_line_prefix=get_line_prefix, wrap_lines=False, style=\"class:output\"), lineup, input_field, search_field, linedown, lineup, Window( FormattedTextControl(", "+ '\\n') connection.sendall(bytes(command, 'utf-8')) def local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self): with", "not DEBUGAPP: return echodbg_buffer.document = Document( text=msg, cursor_position=0 ) def indent(text: str, spaces=2):", "MPDClient(object): def __init__(self, hostname: str, port: int): self.selector = selectors.DefaultSelector() self._inbuffer = []", "len(self._echobuffer) def pop_message(self) -> str: with self._io_lock: msg = self._inbuffer.pop() if str(msg).strip() ==", "Style.from_dict( { \"function\": base0D, \"parameter\": base08, \"exec\": base0E, \"execparam\": base09, \"trailing-input\": base0F, \"output\":", "hostname self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((hostname, port)) data = self.socket.recv(RECV_BUFFER_SIZE)", "closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self) -> bool: self.send('ping') def", "TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>\") help_text = HTML(f\"Exit: <c4>[Control-C]</c4> |", "= threading.Timer( self.next_call - time.time(), self._run) self._timer.start() self.is_running = True def stop(self): self._timer.cancel()", "None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show internal debug info (default: 0)\", type=bool, default=False,", "!= '' or local_output != '': output_field.document = Document( text=new_text, cursor_position=len(new_text)) application.invalidate() ####################################", "\"swapid\", \"tagtypes\", \"toggleoutput\", \"unmount\", \"unsubscribe\", \"update\", \"urlhandlers\", \"volume\"] internalcmds = { \"exec\": lambda", "get_line_prefix=get_socketdbg_prefix, wrap_lines=False, style=\"class:debug\") echownd = Window( BufferControl(buffer=echodbg_buffer), height=1, get_line_prefix=get_echodbg_prefix, wrap_lines=False, style=\"class:debug\") debugzone =", "\"commands\", \"config\", \"consume\", \"count\", \"crossfade\", \"currentsong\", \"decoders\", \"delete\", \"deleteid\", \"delpartition\", \"disableoutput\", \"enableoutput\", \"find\",", "resetterm(s,x) } class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer = None", "application if args.secret is not None: mpd.send(f\"password {args.secret}\") application.run() autoping.stop() autopoll.stop() mpd.disconnect() if", "compile from prompt_toolkit.contrib.regular_languages.completion import \\ GrammarCompleter from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer from prompt_toolkit.document import", "GrammarLexer from prompt_toolkit.document import Document from prompt_toolkit.key_binding import KeyBindings from prompt_toolkit.layout.containers import (Float,", "sys import threading import time import selectors from datetime import datetime from pathlib", "\"replay_gain_status\", \"rescan\", \"rm\", \"save\", \"search\", \"searchadd\", \"searchaddpl\", \"seek\", \"seekcur\", \"seekid\", \"sendmessage\", \"setvol\", \"shuffle\",", "text=new_text, cursor_position=len(new_text)) application.invalidate() #################################### # netpoll() end #################################### autoping = RepeatedTimer(3.0, lambda x:", "\"func\": SimpleLexer(\"class:function\"), \"params\": SimpleLexer(\"class:parameter\"), \"exec\": SimpleLexer(\"class:exec\"), \"execparam\": SimpleLexer(\"class:execparam\"), }, ) commands = []", "def stop(self): self._timer.cancel() self.is_running = False class MPDClient(object): def __init__(self, hostname: str, port:", "local_echo(self, message): with self._io_lock: self._echobuffer.append(message) def close(self): with self.socket_lock: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def create_grammar():", "= True def stop(self): self._timer.cancel() self.is_running = False class MPDClient(object): def __init__(self, hostname:", "(DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})\") echodbg_print( f\"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}\") new_text += local_output if", "BaseException as ex: print(\"Connection closed by remote: {}\".format(ex)) finally: self.socket.shutdown(socket.SHUT_WR) self.socket.close() def ping(self)", "= False APP = None mpdcmds = [ \"add\", \"addid\", \"addtagid\", \"albumart\", \"channels\",", "try: self.send('ping') except BaseException: with self.state_lock: self._remote_closed = True def force_closed(self): with self.state_lock:", "waited. (default: 3)\", type=int, default=3, required=False) parser.add_argument(\"-n\", \"--no-echo\", help=\"Own commands don't get written", "\"--secret\", help=\"Initialize connection with this password (default: None)\", type=str, required=False) parser.add_argument(\"-d\", \"--debug\", help=\"Show", "'#FF6236' base0C = '#00aabb' base0D = '#0e5a94' base0E = '#b31e8d' base0F = '#7a2d00'", "\"title\": f\"bg:{base02} #90A4AE\", \"c1\": \"#FF5722\", \"c2\": \"#D4E157\", \"c3\": \"#9575CD\", \"c4\": \"#4CAF50\", \"c5\": \"#9C27B0\"", "self._remote_closed def poll(self): events = self.selector.select() for key, mask in events: callback =", "params = params.get(\"execparam\") funcptr = internalcmds.get( execcmd[1:], lambda s, x: invalid_input(\"Unknown internal command\"))" ]
[ "and not valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if kw == 'description' and", "link else False def test_new_item(item_dict): ''' takes an item dictionary and checks if", "and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size)) def valid_item_name(item_name):", "''' my_valid_vars = ['name', 'category', 'description', 'link'] for kw in item_dict: if kw", "file has the right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return", "not valid\") if kw == 'link' and not valid_link(item_dict[kw]): return (False, \"Link not", "test_item_prop(item_dict): ''' Tests all the properties passed in the item_dict and checks if", "Test item name for bad words or format etc -not fully implemented for", "allowed_file(filename): ''' Checks if an image file has the right extension ''' ALLOWED_EXTENSIONS", "not valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\") return (True, None) def test_item_prop(item_dict):", "valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if kw == 'category' and item_dict[kw] not", "['name', 'category', 'description', 'link'] for kw in item_dict: if kw not in my_valid_vars:", "valid\") return (True, None) def remove_special_characters(my_string): return ''.join(e for e in my_string if", "Success, string: Error) ''' # for now just test the surface things category", "if the link provided is a valid link format - not fully implemented", "project''' if len(item_description) > 1000: return False return True def allowed_file(filename): ''' Checks", "state by randomly choosing uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits)", "not valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if kw == 'description' and not", "True if link else False def test_new_item(item_dict): ''' takes an item dictionary and", "test_new_item(item_dict): ''' takes an item dictionary and checks if all fields are properly", "not valid\") link = item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return (False, \"Link", "for updating the item ''' my_valid_vars = ['name', 'category', 'description', 'link'] for kw", "\\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the link provided", "import dicttoxml def make_csrf_state(size): ''' Makes a CSRF state by randomly choosing uppercase", "valid for updating the item ''' my_valid_vars = ['name', 'category', 'description', 'link'] for", "bad words or format etc -not fully implemented for this project''' if len(item_name)", "and checks if they are valid for updating the item ''' my_valid_vars =", "in the item_dict and checks if they are valid for updating the item", "all fields are properly filled returns: tuple (Bool: Success, string: Error) ''' #", "return (False, \"Category is invalid\") name = item_dict.get(\"name\") if not name or not", "properties passed in the item_dict and checks if they are valid for updating", "for bad words or format etc -not fully implemented for this project''' if", "== True: return (False, \"Description not valid\") link = item_dict.get(\"link\") if link and", "that doesn't exist: %s\"%kw) if kw == 'name' and not valid_item_name(item_dict[kw]): return (False,", "= re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return False return True if link", "provided is a valid link format - not fully implemented for this project'''", "and not valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if kw == 'category' and", "def valid_item_description(item_description): '''test item description for bad words or format etc -not fully", "not pat: return False return True if link else False def test_new_item(item_dict): '''", "for kw in item_dict: if kw not in my_valid_vars: return (False, \"You are", "valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if kw == 'description' and not valid_item_description(item_dict[kw]):", "an item dictionary and checks if all fields are properly filled returns: tuple", "valid\") link = item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return (False, \"Link is", "or format etc -not fully implemented for this project''' if len(item_description) > 1000:", "(True, None) def remove_special_characters(my_string): return ''.join(e for e in my_string if e.isalnum()).lower() def", "image file has the right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"])", "'description', 'link'] for kw in item_dict: if kw not in my_valid_vars: return (False,", "not valid\") return (True, None) def remove_special_characters(my_string): return ''.join(e for e in my_string", "> 50: return False; return True def valid_item_description(item_description): '''test item description for bad", "= item_dict.get(\"name\") if not name or not valid_item_name(name) == True: return (False, \"Name", "1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the link provided is a", "link = item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return (False, \"Link is not", "etc -not fully implemented for this project''' if len(item_name) > 50: return False;", "''' Checks if an image file has the right extension ''' ALLOWED_EXTENSIONS =", "valid_item_description(item_description): '''test item description for bad words or format etc -not fully implemented", "if kw == 'description' and not valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if", "Checks if an image file has the right extension ''' ALLOWED_EXTENSIONS = set(['jpg',", "= ['name', 'category', 'description', 'link'] for kw in item_dict: if kw not in", "valid_item_name(name) == True: return (False, \"Name not valid\") description = item_dict.get(\"description\") if not", "not fully implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not", "import random import string import other_info import re from dicttoxml import dicttoxml def", "just test the surface things category = item_dict.get(\"category\") if not category in other_info.item_categories:", "string import other_info import re from dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes", "valid_link(item_dict[kw]): return (False, \"Link not valid\") return (True, None) def remove_special_characters(my_string): return ''.join(e", "has the right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return '.'", "\"Category is invalid\") name = item_dict.get(\"name\") if not name or not valid_item_name(name) ==", "item ''' my_valid_vars = ['name', 'category', 'description', 'link'] for kw in item_dict: if", "''' Test item name for bad words or format etc -not fully implemented", "== 'link' and not valid_link(item_dict[kw]): return (False, \"Link not valid\") return (True, None)", "kw not in my_valid_vars: return (False, \"You are trying to update a property", "-not fully implemented for this project''' if len(item_name) > 50: return False; return", "'category' and item_dict[kw] not in other_info.item_categories: return (False, \"Category not valid\") if kw", "''' Checks if the link provided is a valid link format - not", "my_valid_vars = ['name', 'category', 'description', 'link'] for kw in item_dict: if kw not", "not valid\") description = item_dict.get(\"description\") if not valid_item_description(description) == True: return (False, \"Description", "not in my_valid_vars: return (False, \"You are trying to update a property that", "if kw not in my_valid_vars: return (False, \"You are trying to update a", "''' Tests all the properties passed in the item_dict and checks if they", "- not fully implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if", "def valid_item_name(item_name): ''' Test item name for bad words or format etc -not", "\"You are trying to update a property that doesn't exist: %s\"%kw) if kw", "checks if they are valid for updating the item ''' my_valid_vars = ['name',", "= item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\")", "is a valid link format - not fully implemented for this project''' pat", "format - not fully implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link)", "ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in filename and \\ filename.rsplit('.',", "is invalid\") name = item_dict.get(\"name\") if not name or not valid_item_name(name) == True:", "(False, \"Description not valid\") if kw == 'category' and item_dict[kw] not in other_info.item_categories:", "category = item_dict.get(\"category\") if not category in other_info.item_categories: return (False, \"Category is invalid\")", "def test_item_prop(item_dict): ''' Tests all the properties passed in the item_dict and checks", "'.' in filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks", "format etc -not fully implemented for this project''' if len(item_description) > 1000: return", "in filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if", "def valid_link(link): ''' Checks if the link provided is a valid link format", "item_dict.get(\"name\") if not name or not valid_item_name(name) == True: return (False, \"Name not", "the right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in", "item_dict.get(\"description\") if not valid_item_description(description) == True: return (False, \"Description not valid\") link =", "len(item_description) > 1000: return False return True def allowed_file(filename): ''' Checks if an", "if all fields are properly filled returns: tuple (Bool: Success, string: Error) '''", "bad words or format etc -not fully implemented for this project''' if len(item_description)", "random import string import other_info import re from dicttoxml import dicttoxml def make_csrf_state(size):", "filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the link provided is", "and not valid_link(item_dict[kw]): return (False, \"Link not valid\") return (True, None) def remove_special_characters(my_string):", "return (False, \"Link not valid\") return (True, None) def remove_special_characters(my_string): return ''.join(e for", "link format - not fully implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\")", "description = item_dict.get(\"description\") if not valid_item_description(description) == True: return (False, \"Description not valid\")", "valid_item_name(item_name): ''' Test item name for bad words or format etc -not fully", "trying to update a property that doesn't exist: %s\"%kw) if kw == 'name'", "''' Makes a CSRF state by randomly choosing uppercase letters and digits '''", "return (True, None) def test_item_prop(item_dict): ''' Tests all the properties passed in the", "for now just test the surface things category = item_dict.get(\"category\") if not category", "Tests all the properties passed in the item_dict and checks if they are", "50: return False; return True def valid_item_description(item_description): '''test item description for bad words", "True def allowed_file(filename): ''' Checks if an image file has the right extension", "in other_info.item_categories: return (False, \"Category is invalid\") name = item_dict.get(\"name\") if not name", "= item_dict.get(\"category\") if not category in other_info.item_categories: return (False, \"Category is invalid\") name", "== 'description' and not valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if kw ==", "not valid_link(item_dict[kw]): return (False, \"Link not valid\") return (True, None) def remove_special_characters(my_string): return", "== 'name' and not valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if kw ==", "kw in item_dict: if kw not in my_valid_vars: return (False, \"You are trying", "item_dict and checks if they are valid for updating the item ''' my_valid_vars", "surface things category = item_dict.get(\"category\") if not category in other_info.item_categories: return (False, \"Category", "None) def remove_special_characters(my_string): return ''.join(e for e in my_string if e.isalnum()).lower() def get_cat_regex():", "False return True if link else False def test_new_item(item_dict): ''' takes an item", "True: return (False, \"Name not valid\") description = item_dict.get(\"description\") if not valid_item_description(description) ==", "link and not valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\") return (True, None)", "return (False, \"You are trying to update a property that doesn't exist: %s\"%kw)", "if kw == 'link' and not valid_link(item_dict[kw]): return (False, \"Link not valid\") return", "item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\") return", "string: Error) ''' # for now just test the surface things category =", "fully implemented for this project''' if len(item_description) > 1000: return False return True", "'gif', \"png\"]) return '.' in filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def", "a property that doesn't exist: %s\"%kw) if kw == 'name' and not valid_item_name(item_dict[kw]):", "valid\") return (True, None) def test_item_prop(item_dict): ''' Tests all the properties passed in", "valid link format - not fully implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w", "filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the", "'link'] for kw in item_dict: if kw not in my_valid_vars: return (False, \"You", "e in my_string if e.isalnum()).lower() def get_cat_regex(): return \"(?i)\"+'|'.join(other_info.item_categories) def makexml(my_dict): return dicttoxml(my_dict)", "1000: return False return True def allowed_file(filename): ''' Checks if an image file", "import other_info import re from dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes a", "are valid for updating the item ''' my_valid_vars = ['name', 'category', 'description', 'link']", "\"Category not valid\") if kw == 'link' and not valid_link(item_dict[kw]): return (False, \"Link", "CSRF state by randomly choosing uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase +", "if not pat: return False return True if link else False def test_new_item(item_dict):", "kw == 'link' and not valid_link(item_dict[kw]): return (False, \"Link not valid\") return (True,", "other_info.item_categories: return (False, \"Category not valid\") if kw == 'link' and not valid_link(item_dict[kw]):", "return True if link else False def test_new_item(item_dict): ''' takes an item dictionary", "return (False, \"Name not valid\") description = item_dict.get(\"description\") if not valid_item_description(description) == True:", "dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes a CSRF state by randomly choosing", "'name' and not valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if kw == 'description'", "and not valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\") return (True, None) def", "things category = item_dict.get(\"category\") if not category in other_info.item_categories: return (False, \"Category is", "item dictionary and checks if all fields are properly filled returns: tuple (Bool:", "and item_dict[kw] not in other_info.item_categories: return (False, \"Category not valid\") if kw ==", "for this project''' if len(item_name) > 50: return False; return True def valid_item_description(item_description):", "not valid_item_name(name) == True: return (False, \"Name not valid\") description = item_dict.get(\"description\") if", "fully implemented for this project''' if len(item_name) > 50: return False; return True", "if not category in other_info.item_categories: return (False, \"Category is invalid\") name = item_dict.get(\"name\")", "dictionary and checks if all fields are properly filled returns: tuple (Bool: Success,", "valid\") if kw == 'description' and not valid_item_description(item_dict[kw]): return (False, \"Description not valid\")", "the item ''' my_valid_vars = ['name', 'category', 'description', 'link'] for kw in item_dict:", "Error) ''' # for now just test the surface things category = item_dict.get(\"category\")", "from dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes a CSRF state by randomly", "def allowed_file(filename): ''' Checks if an image file has the right extension '''", "return False return True def allowed_file(filename): ''' Checks if an image file has", "test the surface things category = item_dict.get(\"category\") if not category in other_info.item_categories: return", "implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return", "def test_new_item(item_dict): ''' takes an item dictionary and checks if all fields are", "(False, \"Name not valid\") description = item_dict.get(\"description\") if not valid_item_description(description) == True: return", "''' takes an item dictionary and checks if all fields are properly filled", "all the properties passed in the item_dict and checks if they are valid", "by randomly choosing uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for", "in item_dict: if kw not in my_valid_vars: return (False, \"You are trying to", "letters and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size)) def", "the properties passed in the item_dict and checks if they are valid for", "\"Description not valid\") if kw == 'category' and item_dict[kw] not in other_info.item_categories: return", "\"png\"]) return '.' in filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link):", "doesn't exist: %s\"%kw) if kw == 'name' and not valid_item_name(item_dict[kw]): return (False, \"Name", "etc -not fully implemented for this project''' if len(item_description) > 1000: return False", "if not name or not valid_item_name(name) == True: return (False, \"Name not valid\")", "if link else False def test_new_item(item_dict): ''' takes an item dictionary and checks", "words or format etc -not fully implemented for this project''' if len(item_description) >", "are trying to update a property that doesn't exist: %s\"%kw) if kw ==", "in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the link provided is a valid", "the item_dict and checks if they are valid for updating the item '''", "return (False, \"Name not valid\") if kw == 'description' and not valid_item_description(item_dict[kw]): return", "to update a property that doesn't exist: %s\"%kw) if kw == 'name' and", "if kw == 'name' and not valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if", "not category in other_info.item_categories: return (False, \"Category is invalid\") name = item_dict.get(\"name\") if", "set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in filename and \\ filename.rsplit('.', 1)[1] in", "if len(item_name) > 50: return False; return True def valid_item_description(item_description): '''test item description", "\"Name not valid\") description = item_dict.get(\"description\") if not valid_item_description(description) == True: return (False,", "False return True def allowed_file(filename): ''' Checks if an image file has the", "uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size))", "valid_item_description(description) == True: return (False, \"Description not valid\") link = item_dict.get(\"link\") if link", "item description for bad words or format etc -not fully implemented for this", "if they are valid for updating the item ''' my_valid_vars = ['name', 'category',", "name for bad words or format etc -not fully implemented for this project'''", "takes an item dictionary and checks if all fields are properly filled returns:", "False def test_new_item(item_dict): ''' takes an item dictionary and checks if all fields", "property that doesn't exist: %s\"%kw) if kw == 'name' and not valid_item_name(item_dict[kw]): return", "return (False, \"Category not valid\") if kw == 'link' and not valid_link(item_dict[kw]): return", "digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size)) def valid_item_name(item_name): '''", "(False, \"Category is invalid\") name = item_dict.get(\"name\") if not name or not valid_item_name(name)", "''' # for now just test the surface things category = item_dict.get(\"category\") if", "name or not valid_item_name(name) == True: return (False, \"Name not valid\") description =", "def remove_special_characters(my_string): return ''.join(e for e in my_string if e.isalnum()).lower() def get_cat_regex(): return", "\"Link not valid\") return (True, None) def remove_special_characters(my_string): return ''.join(e for e in", "return (False, \"Link is not valid\") return (True, None) def test_item_prop(item_dict): ''' Tests", "xrange(size)) def valid_item_name(item_name): ''' Test item name for bad words or format etc", "and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the link", "return '.' in filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS def valid_link(link): '''", "fully implemented for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat:", "kw == 'name' and not valid_item_name(item_dict[kw]): return (False, \"Name not valid\") if kw", "+ string.digits) for _ in xrange(size)) def valid_item_name(item_name): ''' Test item name for", "(Bool: Success, string: Error) ''' # for now just test the surface things", "other_info.item_categories: return (False, \"Category is invalid\") name = item_dict.get(\"name\") if not name or", "implemented for this project''' if len(item_description) > 1000: return False return True def", "not name or not valid_item_name(name) == True: return (False, \"Name not valid\") description", "a valid link format - not fully implemented for this project''' pat =", "link provided is a valid link format - not fully implemented for this", "len(item_name) > 50: return False; return True def valid_item_description(item_description): '''test item description for", "my_valid_vars: return (False, \"You are trying to update a property that doesn't exist:", "valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\") return (True, None) def test_item_prop(item_dict): '''", "category in other_info.item_categories: return (False, \"Category is invalid\") name = item_dict.get(\"name\") if not", "string.digits) for _ in xrange(size)) def valid_item_name(item_name): ''' Test item name for bad", "right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in filename", "(False, \"Link not valid\") return (True, None) def remove_special_characters(my_string): return ''.join(e for e", "words or format etc -not fully implemented for this project''' if len(item_name) >", "not valid\") return (True, None) def test_item_prop(item_dict): ''' Tests all the properties passed", "'category', 'description', 'link'] for kw in item_dict: if kw not in my_valid_vars: return", "or not valid_item_name(name) == True: return (False, \"Name not valid\") description = item_dict.get(\"description\")", "in my_valid_vars: return (False, \"You are trying to update a property that doesn't", "kw == 'category' and item_dict[kw] not in other_info.item_categories: return (False, \"Category not valid\")", "for _ in xrange(size)) def valid_item_name(item_name): ''' Test item name for bad words", "and checks if all fields are properly filled returns: tuple (Bool: Success, string:", "name = item_dict.get(\"name\") if not name or not valid_item_name(name) == True: return (False,", "== 'category' and item_dict[kw] not in other_info.item_categories: return (False, \"Category not valid\") if", "(False, \"Link is not valid\") return (True, None) def test_item_prop(item_dict): ''' Tests all", "def make_csrf_state(size): ''' Makes a CSRF state by randomly choosing uppercase letters and", "project''' if len(item_name) > 50: return False; return True def valid_item_description(item_description): '''test item", "not valid\") if kw == 'category' and item_dict[kw] not in other_info.item_categories: return (False,", "for this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return False", "= item_dict.get(\"description\") if not valid_item_description(description) == True: return (False, \"Description not valid\") link", "the link provided is a valid link format - not fully implemented for", "item_dict: if kw not in my_valid_vars: return (False, \"You are trying to update", "item_dict.get(\"category\") if not category in other_info.item_categories: return (False, \"Category is invalid\") name =", "return (True, None) def remove_special_characters(my_string): return ''.join(e for e in my_string if e.isalnum()).lower()", "''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in filename and \\", "ALLOWED_EXTENSIONS def valid_link(link): ''' Checks if the link provided is a valid link", "remove_special_characters(my_string): return ''.join(e for e in my_string if e.isalnum()).lower() def get_cat_regex(): return \"(?i)\"+'|'.join(other_info.item_categories)", "invalid\") name = item_dict.get(\"name\") if not name or not valid_item_name(name) == True: return", "True def valid_item_description(item_description): '''test item description for bad words or format etc -not", "filled returns: tuple (Bool: Success, string: Error) ''' # for now just test", "not valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if kw == 'category' and item_dict[kw]", "item name for bad words or format etc -not fully implemented for this", "(False, \"Category not valid\") if kw == 'link' and not valid_link(item_dict[kw]): return (False,", "valid\") description = item_dict.get(\"description\") if not valid_item_description(description) == True: return (False, \"Description not", "in other_info.item_categories: return (False, \"Category not valid\") if kw == 'link' and not", "(False, \"You are trying to update a property that doesn't exist: %s\"%kw) if", "= set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in filename and \\ filename.rsplit('.', 1)[1]", "_ in xrange(size)) def valid_item_name(item_name): ''' Test item name for bad words or", "or format etc -not fully implemented for this project''' if len(item_name) > 50:", "tuple (Bool: Success, string: Error) ''' # for now just test the surface", "return ''.join(e for e in my_string if e.isalnum()).lower() def get_cat_regex(): return \"(?i)\"+'|'.join(other_info.item_categories) def", "randomly choosing uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _", "re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return False return True if link else", "an image file has the right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif',", "in xrange(size)) def valid_item_name(item_name): ''' Test item name for bad words or format", "(False, \"Name not valid\") if kw == 'description' and not valid_item_description(item_dict[kw]): return (False,", "> 1000: return False return True def allowed_file(filename): ''' Checks if an image", "\\.-]*)*\\/?$/\") pat.match(link) if not pat: return False return True if link else False", "they are valid for updating the item ''' my_valid_vars = ['name', 'category', 'description',", "project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return False return True", "return True def valid_item_description(item_description): '''test item description for bad words or format etc", "description for bad words or format etc -not fully implemented for this project'''", "''.join(e for e in my_string if e.isalnum()).lower() def get_cat_regex(): return \"(?i)\"+'|'.join(other_info.item_categories) def makexml(my_dict):", "%s\"%kw) if kw == 'name' and not valid_item_name(item_dict[kw]): return (False, \"Name not valid\")", "valid_link(link): ''' Checks if the link provided is a valid link format -", "\"Name not valid\") if kw == 'description' and not valid_item_description(item_dict[kw]): return (False, \"Description", "return False; return True def valid_item_description(item_description): '''test item description for bad words or", "are properly filled returns: tuple (Bool: Success, string: Error) ''' # for now", "None) def test_item_prop(item_dict): ''' Tests all the properties passed in the item_dict and", "implemented for this project''' if len(item_name) > 50: return False; return True def", "(False, \"Description not valid\") link = item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return", "-not fully implemented for this project''' if len(item_description) > 1000: return False return", "\"Description not valid\") link = item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]): return (False,", "if kw == 'category' and item_dict[kw] not in other_info.item_categories: return (False, \"Category not", "item_dict[kw] not in other_info.item_categories: return (False, \"Category not valid\") if kw == 'link'", "Makes a CSRF state by randomly choosing uppercase letters and digits ''' return", "fields are properly filled returns: tuple (Bool: Success, string: Error) ''' # for", "extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', \"png\"]) return '.' in filename and", "for e in my_string if e.isalnum()).lower() def get_cat_regex(): return \"(?i)\"+'|'.join(other_info.item_categories) def makexml(my_dict): return", "if len(item_description) > 1000: return False return True def allowed_file(filename): ''' Checks if", "returns: tuple (Bool: Success, string: Error) ''' # for now just test the", "# for now just test the surface things category = item_dict.get(\"category\") if not", "this project''' if len(item_name) > 50: return False; return True def valid_item_description(item_description): '''test", "== True: return (False, \"Name not valid\") description = item_dict.get(\"description\") if not valid_item_description(description)", "False; return True def valid_item_description(item_description): '''test item description for bad words or format", "\"Link is not valid\") return (True, None) def test_item_prop(item_dict): ''' Tests all the", "not in other_info.item_categories: return (False, \"Category not valid\") if kw == 'link' and", "return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size)) def valid_item_name(item_name): ''' Test item", "passed in the item_dict and checks if they are valid for updating the", "else False def test_new_item(item_dict): ''' takes an item dictionary and checks if all", "'link' and not valid_link(item_dict[kw]): return (False, \"Link not valid\") return (True, None) def", "if link and not valid_link(item_dict[\"link\"]): return (False, \"Link is not valid\") return (True,", "pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return False return True if", "the surface things category = item_dict.get(\"category\") if not category in other_info.item_categories: return (False,", "kw == 'description' and not valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if kw", "exist: %s\"%kw) if kw == 'name' and not valid_item_name(item_dict[kw]): return (False, \"Name not", "import string import other_info import re from dicttoxml import dicttoxml def make_csrf_state(size): '''", "now just test the surface things category = item_dict.get(\"category\") if not category in", "re from dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes a CSRF state by", "not valid_item_description(description) == True: return (False, \"Description not valid\") link = item_dict.get(\"link\") if", "not valid\") if kw == 'description' and not valid_item_description(item_dict[kw]): return (False, \"Description not", "''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size)) def valid_item_name(item_name): ''' Test item name", "return False return True if link else False def test_new_item(item_dict): ''' takes an", "if an image file has the right extension ''' ALLOWED_EXTENSIONS = set(['jpg', 'jpeg',", "'description' and not valid_item_description(item_dict[kw]): return (False, \"Description not valid\") if kw == 'category'", "other_info import re from dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes a CSRF", "for this project''' if len(item_description) > 1000: return False return True def allowed_file(filename):", "Checks if the link provided is a valid link format - not fully", "'''test item description for bad words or format etc -not fully implemented for", "(True, None) def test_item_prop(item_dict): ''' Tests all the properties passed in the item_dict", "return (False, \"Description not valid\") if kw == 'category' and item_dict[kw] not in", "properly filled returns: tuple (Bool: Success, string: Error) ''' # for now just", "'jpeg', 'gif', \"png\"]) return '.' in filename and \\ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "dicttoxml def make_csrf_state(size): ''' Makes a CSRF state by randomly choosing uppercase letters", "''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size)) def valid_item_name(item_name): ''' Test", "pat: return False return True if link else False def test_new_item(item_dict): ''' takes", "checks if all fields are properly filled returns: tuple (Bool: Success, string: Error)", "this project''' if len(item_description) > 1000: return False return True def allowed_file(filename): '''", "if not valid_item_description(description) == True: return (False, \"Description not valid\") link = item_dict.get(\"link\")", "updating the item ''' my_valid_vars = ['name', 'category', 'description', 'link'] for kw in", "make_csrf_state(size): ''' Makes a CSRF state by randomly choosing uppercase letters and digits", "update a property that doesn't exist: %s\"%kw) if kw == 'name' and not", "import re from dicttoxml import dicttoxml def make_csrf_state(size): ''' Makes a CSRF state", "valid\") if kw == 'link' and not valid_link(item_dict[kw]): return (False, \"Link not valid\")", "valid\") if kw == 'category' and item_dict[kw] not in other_info.item_categories: return (False, \"Category", "return (False, \"Description not valid\") link = item_dict.get(\"link\") if link and not valid_link(item_dict[\"link\"]):", "this project''' pat = re.compile(\"/^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/\\w \\.-]*)*\\/?$/\") pat.match(link) if not pat: return False return", "format etc -not fully implemented for this project''' if len(item_name) > 50: return", "is not valid\") return (True, None) def test_item_prop(item_dict): ''' Tests all the properties", "a CSRF state by randomly choosing uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase", "return True def allowed_file(filename): ''' Checks if an image file has the right", "True: return (False, \"Description not valid\") link = item_dict.get(\"link\") if link and not", "pat.match(link) if not pat: return False return True if link else False def", "choosing uppercase letters and digits ''' return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in" ]
[ "name = \"U\", summ = False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0,", "__str__(self): rep = '\\ndiff signal variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales:", "diffus: BrownianMotion object for diffusion GP interpolation s: Integer parameterizing how denser the", "optimization only in the white domain fix_Z: Boolean - whether inducing locations are", "str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn", "Boolean for Tensorflow summary whiten: Boolean. Currently we perform the optimization only in", "tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def __str__(self): rep = 'noise variance: '", "A, lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def", "fix_Z) Ug = Param(U0, name = \"U\", summ = False, fixed = fix_U)", "returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw)", "sn0: Numpy vector of size 1xD for initial signal variance kern: Kernel object", "U0: Numpy matrix of initial inducing vectors of size MxD, M being the", "= sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn =", "array of time points the integral is evaluated at Returns: ODE solution computed", "= fix_sn, transform = transforms.Log1pe()) self.Z = Z() self.U = U() self.sn =", "locations \"\"\" U = self.U Z = self.Z kern = self.kern N =", "lower=False) f = tf.matmul(A, U, transpose_a=True) # transformation for \"id - rbf\" kernel", "kern: Kernel object for GP interpolation diffus: BrownianMotion object for diffusion GP interpolation", "\\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\"", "the number of locations. Returns: TxD tensor of differential function (GP conditional) computed", "of inducing points. sn0: Numpy vector of size 1xD for initial signal variance", "= kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D,", "for the NPSDE model Args: Z0: Numpy matrix of initial inducing points of", "we perform the optimization only in the white domain fix_Z: Boolean - whether", "= Param(U0, name = \"U\", summ = False, fixed = fix_U) self.kern =", "Tensorflow summary fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U:", "else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def U(self):", "domain summ: Boolean for Tensorflow summary fix_Z: Boolean - whether inducing locations are", "class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model Args: Z0:", "initial value t: Python/numpy array of time points the integral is evaluated at", "tf.shape(Z)[1] # dim of state if kern.ktype == \"id\": Kzz = kern.K(Z) +", "diffusion GP interpolation s: Integer parameterizing how denser the integration points are jitter:", "of inducing points. U0: Numpy matrix of initial inducing vectors of size MxD,", "def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model Args: Z0: Numpy matrix", "Boolean - whether inducing vectors are fixed or optimized fix_sn: Boolean - whether", "fixed or optimized fix_sn: Boolean - whether noise variance is fixed or optimized", "str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0,", "== \"kr\" : f = tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype ==", "= tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def __str__(self): rep = 'noise variance:", "= 'npode' self.whiten = whiten self.kern = kern self.jitter = jitter with tf.name_scope(\"NPDE\"):", "' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for", "self.diffus = diffus self.integrator = SDEEM(self) def build_prior(self): pf = super().build_prior() pg =", "not kern.ktype == \"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1] if", "Boolean - whether inducing locations are fixed or optimized fix_U: Boolean - whether", "samples \"\"\" # returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t]", "kernels import OperatorKernel from gpflow import transforms from param import Param float_type =", "\"sn\", summ = summ, fixed = fix_sn, transform = transforms.Log1pe()) self.Z = Z()", "initial inducing points of size MxD, M being the number of inducing points.", "self.name = 'npode' self.whiten = whiten self.kern = kern self.jitter = jitter with", "A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U, transpose_a=True) # transformation for", "optimization only in the white domain summ: Boolean for Tensorflow summary fix_Z: Boolean", "Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if", "+ \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):", "t, tensor of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator", "tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\", summ = False, fixed = fix_Z)", "else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx", "__init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model Args: Z0: Numpy matrix of", "tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten:", "in the white domain fix_Z: Boolean - whether inducing locations are fixed or", "tf.cholesky(Kzz) Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten:", "Args: X: current states (in rows) t: current time (used if diffusion depends", "= tf.matmul(A, U, transpose_a=True) # transformation for \"id - rbf\" kernel if not", "or optimized fix_U: Boolean - whether inducing vectors are fixed or optimized fix_sn:", "generates state dependent brownian motion Args: X: current states (in rows) t: current", "Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A,", "'\\nsignal variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep", "= tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f", "U, transpose_a=True) # transformation for \"id - rbf\" kernel if not kern.ktype ==", "whether noise variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde'", "are fixed or optimized fix_U: Boolean - whether inducing vectors are fixed or", "self.kern.ktype == \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance(", "A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)", "= 'npsde' self.diffus = diffus self.integrator = SDEEM(self) def build_prior(self): pf = super().build_prior()", "= whiten self.kern = kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0,", "str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep def build_prior(self): if", "dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz,", "= kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg,", "- whether inducing locations are fixed or optimized fix_U: Boolean - whether inducing", "white domain summ: Boolean for Tensorflow summary fix_Z: Boolean - whether inducing locations", "the white domain fix_Z: Boolean - whether inducing locations are fixed or optimized", "super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus self.integrator = SDEEM(self) def build_prior(self):", "def __str__(self): rep = '\\ndiff signal variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff", "return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples from a learned SDE system", "= \"U\", summ = False, fixed = fix_U) sn = Param(np.array(sn0), name =", "self.kern.ktype == \"id\" or self.kern.ktype == \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0]))", "= self.integrator.forward(x0,t,Nw) path = path[0] return path def __str__(self): return super().__str__() + self.diffus.__str__()", "def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model Args: Z0: Numpy matrix", "\"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Zg)", "inducing vectors of size MxD, M being the number of inducing points. sn0:", "if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs", "fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg()", "tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A =", "evaluated at Returns: ODE solution computed at t, tensor of size [len(t),len(x0)] \"\"\"", "self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw)", "= self.kern Z = self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim", "ODE solution computed at t, tensor of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))", "summary fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U: Boolean", "interpolation to compute the value of the differential function at location(s) X. Args:", "= tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for", "self.fix_U = fix_U def g(self,X,t): \"\"\" generates state dependent brownian motion Args: X:", "D = tf.shape(X)[1] if kern.ktype == \"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type)", "= self.Ug Zg = self.Zg kern = self.kern if not kern.ktype == \"id\":", "tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d", "Z() self.U = U() self.sn = sn() self.D = U.shape[1] self.integrator = ODERK4(self)", "to compute the value of the differential function at location(s) X. Args: X:", "tensor of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator =", "if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.Ug), covariance_matrix=self.kern.K(self.Zg,self.Zg)) return", "+ str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the", "of size MxD, M being the number of inducing points. sn0: Numpy vector", "fixed = fix_sn, transform = transforms.Log1pe()) self.Z = Z() self.U = U() self.sn", "Kzz(self): kern = self.kern Z = self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1]", "U(self): U = self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return", "integration points are jitter: Float of jitter level summ: Boolean for Tensorflow summary", "(Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw) path", "Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A", "= fix_Z) Ug = Param(U0, name = \"U\", summ = False, fixed =", "BrownianMotion object for diffusion GP interpolation s: Integer parameterizing how denser the integration", "- whether noise variance is fixed or optimized \"\"\" self.name = 'npode' self.whiten", "Args: Z0: Numpy matrix of initial inducing points of size MxD, M being", "self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def __str__(self):", "inducing points of size MxD, M being the number of inducing points. U0:", "the integral and returns the path Args: x0: Python/numpy array of initial value", "self.U = U() self.sn = sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z", "fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U: Boolean -", "\"\"\" U = self.U Z = self.Z kern = self.kern N = tf.shape(X)[0]", "fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg", "compute the value of the differential function at location(s) X. Args: X: TxD", "+ \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten:", "variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class", "'\\ndiff signal variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval())", "number of inducing points. U0: Numpy matrix of initial inducing vectors of size", "= np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw) path = path[0] return path", "t = [t] integrator = ODERK4(self) path = integrator.forward(x0,t) path = path[0] return", "only in the white domain fix_Z: Boolean - whether inducing locations are fixed", "\"U\", summ = False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel',", "Boolean. Currently we perform the optimization only in the white domain summ: Boolean", "Numpy matrix of initial inducing points of size MxD, M being the number", "whether noise variance is fixed or optimized \"\"\" self.name = 'npode' self.whiten =", "= OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug =", "tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter", "\"\"\" generates state dependent brownian motion Args: X: current states (in rows) t:", "lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug,", "* self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx,", "= integrator.forward(x0,t) path = path[0] return path def Kzz(self): kern = self.kern Z", "of size [Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw, len(t), D) x0 =", "not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw", "X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A,", "self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of", "whether inducing locations are fixed or optimized fix_U: Boolean - whether inducing vectors", "False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf)", "t: Python/numpy array of time points the integral is evaluated at Returns: ODE", "points the integral is evaluated at Returns: Tensor of size [Nw,len(t),len(x0)] storing samples", "variance kern: Kernel object for GP interpolation jitter: Float of jitter level whiten:", "Integer parameterizing how denser the integration points are jitter: Float of jitter level", "= U() self.sn = sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z =", "U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U = fix_U", "self.fix_sn = fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation to", "object for GP interpolation diffus: BrownianMotion object for diffusion GP interpolation s: Integer", "t: Python/numpy array of time points the integral is evaluated at Returns: Tensor", "str(self.sn.eval()) + \\ '\\nsignal variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' +", "def predict(self,x0,t): \"\"\" Computes the integral and returns the path Args: x0: Python/numpy", "Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z = fix_Z", "tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs", "inducing vectors are fixed or optimized fix_sn: Boolean - whether noise variance is", "kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz),", "= 'noise variance: ' + str(self.sn.eval()) + \\ '\\nsignal variance: ' + str(self.kern.sf.eval())", "- whether noise variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name =", "Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Z) +", "path def __str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with", "fixed = fix_U) sn = Param(np.array(sn0), name = \"sn\", summ = summ, fixed", "at t, tensor of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t]", "self.kern Z = self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of", "Boolean - whether noise variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name", "'npsde' self.diffus = diffus self.integrator = SDEEM(self) def build_prior(self): pf = super().build_prior() pg", "fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\", summ = False, fixed", "= False, fixed = fix_Z) Ug = Param(U0, name = \"U\", summ =", "1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\"", "summ = False, fixed = fix_Z) Ug = Param(U0, name = \"U\", summ", "of initial inducing points of size MxD, M being the number of inducing", "= \"Z\", summ = False, fixed = fix_Z) U = Param(U0, name =", "[len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self) path =", "for d in range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn", "tf import tensorflow.contrib.distributions as tfd from integrators import ODERK4, SDEEM from kernels import", "integral is evaluated at Returns: ODE solution computed at t, tensor of size", "inducing points. U0: Numpy matrix of initial inducing vectors of size MxD, M", "False, fixed = fix_Z) U = Param(U0, name = \"U\", summ = False,", "' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE):", "NPODE model Args: Z0: Numpy matrix of initial inducing points of size MxD,", "+ tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def U(self): U = self.U if", "return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral and", "SDE system Args: Nw: Integer number of samples x0: Python/numpy array of initial", "Implements GP interpolation to compute the value of the differential function at location(s)", "= Param(U0, name = \"U\", summ = False, fixed = fix_U) sn =", "Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw, len(t), D) x0", "being the number of inducing points. U0: Numpy matrix of initial inducing vectors", "self.U Z = self.Z kern = self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0]", "import Param float_type = tf.float64 jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):", "location(s) X. Args: X: TxD tensor of input locations, T is the number", "x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self) path = integrator.forward(x0,t) path", "self.whiten = whiten self.kern = kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z =", "with tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\", summ = False, fixed =", "g(self,X,t): \"\"\" generates state dependent brownian motion Args: X: current states (in rows)", "dependent brownian motion Args: X: current states (in rows) t: current time (used", "1xD for initial signal variance kern: Kernel object for GP interpolation diffus: BrownianMotion", "A tensor of the same shape as X \"\"\" Ug = self.Ug Zg", "[t] integrator = ODERK4(self) path = integrator.forward(x0,t) path = path[0] return path def", "= tf.cholesky(Kzz) Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not", "= tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if kern.ktype == \"id\":", "fix_Z) U = Param(U0, name = \"U\", summ = False, fixed = fix_U)", "of jitter level whiten: Boolean. Currently we perform the optimization only in the", "object for GP interpolation jitter: Float of jitter level whiten: Boolean. Currently we", "= kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def U(self): U =", "\"\"\" Ug = self.Ug Zg = self.Zg kern = self.kern if not kern.ktype", "= tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts)", "self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz)", "points are jitter: Float of jitter level summ: Boolean for Tensorflow summary whiten:", "kern: Kernel object for GP interpolation jitter: Float of jitter level whiten: Boolean.", "= self.U Z = self.Z kern = self.kern N = tf.shape(X)[0] M =", "\"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype == \"id\":", "+ pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\"", "perform the optimization only in the white domain summ: Boolean for Tensorflow summary", "NPSDE model Args: Z0: Numpy matrix of initial inducing points of size MxD,", "input locations, T is the number of locations. Returns: TxD tensor of differential", "Args: x0: Python/numpy array of initial value t: Python/numpy array of time points", "super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0,", "X \"\"\" Ug = self.Ug Zg = self.Zg kern = self.kern if not", "tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw", "Param float_type = tf.float64 jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\"", "probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral and returns", "+ self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name", "summ, fixed = fix_sn, transform = transforms.Log1pe()) self.Z = Z() self.U = U()", "self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.Ug), covariance_matrix=self.kern.K(self.Zg,self.Zg)) return tf.reduce_sum(mvn.log_prob(self.Ug))", "self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz)", "fixed or optimized \"\"\" self.name = 'npode' self.whiten = whiten self.kern = kern", "Draws random samples from a learned SDE system Args: Nw: Integer number of", "array of initial value t: Python/numpy array of time points the integral is", "variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep", "U = self.U Z = self.Z kern = self.kern N = tf.shape(X)[0] M", "+ str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else:", "fix_Z self.fix_sn = fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation", "summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model Args: Z0: Numpy matrix of initial", "summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus self.integrator = SDEEM(self) def build_prior(self): pf", "Boolean for Tensorflow summary fix_Z: Boolean - whether inducing locations are fixed or", "dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz,", "whether inducing vectors are fixed or optimized fix_sn: Boolean - whether noise variance", "differential function (GP conditional) computed on input locations \"\"\" U = self.U Z", "= self.kern if not kern.ktype == \"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D", "= False, fixed = fix_Z) U = Param(U0, name = \"U\", summ =", "diffus self.integrator = SDEEM(self) def build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior() return", "locations are fixed or optimized fix_U: Boolean - whether inducing vectors are fixed", "__str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg", "jitter level whiten: Boolean. Currently we perform the optimization only in the white", "X. Args: X: TxD tensor of input locations, T is the number of", "self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def __str__(self): rep =", "Zg = self.Zg kern = self.kern if not kern.ktype == \"id\": raise NotImplementedError()", "raise NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype == \"id\": Kzz", "U def __str__(self): rep = 'noise variance: ' + str(self.sn.eval()) + \\ '\\nsignal", "= tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep = '\\ndiff signal variance: ' +", "Float of jitter level whiten: Boolean. Currently we perform the optimization only in", "kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\", summ", "Python/numpy array of initial value t: Python/numpy array of time points the integral", "mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else:", "\"kr\" : f = tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype == \"id\"", "f = tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype", "== \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]),", "__init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model Args: Z0: Numpy matrix of", "D = tf.shape(Z)[1] # dim of state if kern.ktype == \"id\": Kzz =", "probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes", "interpolation diffus: BrownianMotion object for diffusion GP interpolation s: Integer parameterizing how denser", "Currently we perform the optimization only in the white domain summ: Boolean for", "= ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U = fix_U def f(self,X,t=[0]):", "size [Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))", "time points the integral is evaluated at Returns: ODE solution computed at t,", "vectors of size MxD, M being the number of inducing points. sn0: Numpy", "= self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def", "states (in rows) t: current time (used if diffusion depends on time) Returns:", "' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep def", "returns the path Args: x0: Python/numpy array of initial value t: Python/numpy array", "fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\" generates state dependent brownian motion Args:", "(GP conditional) computed on input locations \"\"\" U = self.U Z = self.Z", "or self.kern.ktype == \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn =", "TxD tensor of input locations, T is the number of locations. Returns: TxD", "the same shape as X \"\"\" Ug = self.Ug Zg = self.Zg kern", "noise variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus", "Currently we perform the optimization only in the white domain fix_Z: Boolean -", "Param(np.array(sn0), name = \"sn\", summ = summ, fixed = fix_sn, transform = transforms.Log1pe())", "the NPSDE model Args: Z0: Numpy matrix of initial inducing points of size", "fix_U: Boolean - whether inducing vectors are fixed or optimized fix_sn: Boolean -", "kern.ktype == \"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz", "self.integrator.forward(x0,t,Nw) path = path[0] return path def __str__(self): return super().__str__() + self.diffus.__str__() class", "def g(self,X,t): \"\"\" generates state dependent brownian motion Args: X: current states (in", "return Kzz def U(self): U = self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U", "loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def", "def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random", "Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Zg) +", "if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U, transpose_a=True)", "number of locations. Returns: TxD tensor of differential function (GP conditional) computed on", "state if kern.ktype == \"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter", "self.kern if not kern.ktype == \"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D =", "\"U\", summ = False, fixed = fix_U) sn = Param(np.array(sn0), name = \"sn\",", "from gpflow import transforms from param import Param float_type = tf.float64 jitter0 =", "path = integrator.forward(x0,t) path = path[0] return path def Kzz(self): kern = self.kern", "Z = self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state", "the integral is evaluated at Returns: ODE solution computed at t, tensor of", "= tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if", "\\ '\\nsignal variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return", "tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if kern.ktype == \"id\": Kzz", "interpolation jitter: Float of jitter level whiten: Boolean. Currently we perform the optimization", "== \"id\" or self.kern.ktype == \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else:", "inducing locations are fixed or optimized fix_U: Boolean - whether inducing vectors are", "self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug", "= Z() self.U = U() self.sn = sn() self.D = U.shape[1] self.integrator =", "loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t):", "of size 1xD for initial signal variance kern: Kernel object for GP interpolation", "self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute the value", "__init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\", summ = False,", "fix_sn: Boolean - whether noise variance is fixed or optimized \"\"\" self.name =", "integral and returns the path Args: x0: Python/numpy array of initial value t:", "and not kern.ktype == \"kr\" : f = tf.reshape(f,[N,D]) return f def build_prior(self):", "initial inducing vectors of size MxD, M being the number of inducing points.", "return rep def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn =", "optimized fix_sn: Boolean - whether noise variance is fixed or optimized \"\"\" self.name", "tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def", "ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\"", "g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples", "of state if kern.ktype == \"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) *", "path def Kzz(self): kern = self.kern Z = self.Z M = tf.shape(Z)[0] D", "path Args: x0: Python/numpy array of initial value t: Python/numpy array of time", "integrators import ODERK4, SDEEM from kernels import OperatorKernel from gpflow import transforms from", "the value of the differential function at location(s) X. Args: X: TxD tensor", "from a learned SDE system Args: Nw: Integer number of samples x0: Python/numpy", "a learned SDE system Args: Nw: Integer number of samples x0: Python/numpy array", "\"Z\", summ = False, fixed = fix_Z) U = Param(U0, name = \"U\",", "= SDEEM(self) def build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior() return pf +", "range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U),", "of differential function (GP conditional) computed on input locations \"\"\" U = self.U", "number of samples x0: Python/numpy array of initial value t: Python/numpy array of", "= fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\" generates state dependent brownian motion", "\"id - rbf\" kernel if not kern.ktype == \"id\" and not kern.ktype ==", "self.jitter = 1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U = fix_U def", "value t: Python/numpy array of time points the integral is evaluated at Returns:", "= tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype == \"id\": Kzz = kern.K(Zg) +", "as tf import tensorflow.contrib.distributions as tfd from integrators import ODERK4, SDEEM from kernels", "self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U, transpose_a=True) # transformation", "white domain fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U:", "computed at t, tensor of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t =", "BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\", summ", "f def build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype == \"kr\": if self.whiten:", "only in the white domain summ: Boolean for Tensorflow summary fix_Z: Boolean -", "and returns the path Args: x0: Python/numpy array of initial value t: Python/numpy", "self.Ug Zg = self.Zg kern = self.kern if not kern.ktype == \"id\": raise", "= whiten self.fix_Z = fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\" generates state", "+ tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A", "the integral is evaluated at Returns: Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\"", "Python/numpy array of time points the integral is evaluated at Returns: ODE solution", "+ str(self.sn.eval()) + \\ '\\nsignal variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: '", "dim of state if kern.ktype == \"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type)", "tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if self.whiten:", "of time points the integral is evaluated at Returns: Tensor of size [Nw,len(t),len(x0)]", "self.whiten = whiten self.fix_Z = fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\" generates", "def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral and returns the", "SDEEM from kernels import OperatorKernel from gpflow import transforms from param import Param", "is fixed or optimized \"\"\" self.name = 'npode' self.whiten = whiten self.kern =", "A, lower=False) f = tf.matmul(A, U, transpose_a=True) # transformation for \"id - rbf\"", "path[0] return path def Kzz(self): kern = self.kern Z = self.Z M =", "diffusion depends on time) Returns: A tensor of the same shape as X", "transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep = '\\ndiff signal variance:", "from kernels import OperatorKernel from gpflow import transforms from param import Param float_type", "tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if kern.ktype", "tf.matmul(A, U, transpose_a=True) # transformation for \"id - rbf\" kernel if not kern.ktype", "number of inducing points. sn0: Numpy vector of size 1xD for initial signal", "pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws", "the number of inducing points. sn0: Numpy vector of size 1xD for initial", "= tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U, transpose_a=True) # transformation for \"id", "else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)])", "= tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return", "U = self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U", "tf.shape(X)[1] if kern.ktype == \"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter", "= kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A =", "Numpy vector of size 1xD for initial signal variance kern: Kernel object for", "kernel if not kern.ktype == \"id\" and not kern.ktype == \"kr\" : f", "= tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the", "[Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t", "self.fix_Z = fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\" generates state dependent brownian", "state dependent brownian motion Args: X: current states (in rows) t: current time", "time points the integral is evaluated at Returns: Tensor of size [Nw,len(t),len(x0)] storing", "import tensorflow.contrib.distributions as tfd from integrators import ODERK4, SDEEM from kernels import OperatorKernel", "variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus =", "as np import tensorflow as tf import tensorflow.contrib.distributions as tfd from integrators import", "transforms from param import Param float_type = tf.float64 jitter0 = 1e-6 class NPODE:", "= fix_U) sn = Param(np.array(sn0), name = \"sn\", summ = summ, fixed =", "depends on time) Returns: A tensor of the same shape as X \"\"\"", "name = \"U\", summ = False, fixed = fix_U) sn = Param(np.array(sn0), name", "Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U = fix_U", "interpolation s: Integer parameterizing how denser the integration points are jitter: Float of", "X: current states (in rows) t: current time (used if diffusion depends on", "' + str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug))", "= self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return", "summ = False, fixed = fix_U) sn = Param(np.array(sn0), name = \"sn\", summ", "= [t] integrator = ODERK4(self) path = integrator.forward(x0,t) path = path[0] return path", "self.integrator = SDEEM(self) def build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior() return pf", "Param(U0, name = \"U\", summ = False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0,", "if not kern.ktype == \"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1]", "= tf.shape(Z)[1] # dim of state if kern.ktype == \"id\": Kzz = kern.K(Z)", "= \"U\", summ = False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\",", "Numpy matrix of initial inducing vectors of size MxD, M being the number", "for Tensorflow summary whiten: Boolean. Currently we perform the optimization only in the", "kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz),", "else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z))", "M = tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype == \"id\": Kzz = kern.K(Zg)", "for initial signal variance kern: Kernel object for GP interpolation jitter: Float of", "is evaluated at Returns: ODE solution computed at t, tensor of size [len(t),len(x0)]", "signal variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return", "M being the number of inducing points. U0: Numpy matrix of initial inducing", "fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute the", "tensor of input locations, T is the number of locations. Returns: TxD tensor", "of initial value t: Python/numpy array of time points the integral is evaluated", "return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model", "mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d])", "s: Integer parameterizing how denser the integration points are jitter: Float of jitter", "for GP interpolation jitter: Float of jitter level whiten: Boolean. Currently we perform", "fixed = fix_Z) U = Param(U0, name = \"U\", summ = False, fixed", "vectors are fixed or optimized fix_sn: Boolean - whether noise variance is fixed", "= tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype ==", "# returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path =", "mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return", "tensorflow as tf import tensorflow.contrib.distributions as tfd from integrators import ODERK4, SDEEM from", "summ: Boolean for Tensorflow summary fix_Z: Boolean - whether inducing locations are fixed", "kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X)", "= False, fixed = fix_U) sn = Param(np.array(sn0), name = \"sn\", summ =", "optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus self.integrator = SDEEM(self)", "name = \"sn\", summ = summ, fixed = fix_sn, transform = transforms.Log1pe()) self.Z", "= U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U =", "NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model Args: Z0: Numpy", "noise variance is fixed or optimized \"\"\" self.name = 'npode' self.whiten = whiten", "whiten: Boolean. Currently we perform the optimization only in the white domain fix_Z:", "- whether inducing vectors are fixed or optimized fix_sn: Boolean - whether noise", "= tf.float64 jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for", "self.Zg kern = self.kern if not kern.ktype == \"id\": raise NotImplementedError() M =", "transpose_a=True) # transformation for \"id - rbf\" kernel if not kern.ktype == \"id\"", "being the number of inducing points. sn0: Numpy vector of size 1xD for", "probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag(", "self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs =", "tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f =", "'\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor", "GP interpolation s: Integer parameterizing how denser the integration points are jitter: Float", "same shape as X \"\"\" Ug = self.Ug Zg = self.Zg kern =", "points. U0: Numpy matrix of initial inducing vectors of size MxD, M being", "points of size MxD, M being the number of inducing points. U0: Numpy", "differential function at location(s) X. Args: X: TxD tensor of input locations, T", "\"\"\" Implements GP interpolation to compute the value of the differential function at", "Constructor for the NPODE model Args: Z0: Numpy matrix of initial inducing points", "if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs", "= 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model", "self.Z kern = self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1]", "array of time points the integral is evaluated at Returns: Tensor of size", "\"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self) path = integrator.forward(x0,t)", "Z = self.Z kern = self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0] D", "= \"Z\", summ = False, fixed = fix_Z) Ug = Param(U0, name =", "dtype=float_type) * self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz", "Param(U0, name = \"U\", summ = False, fixed = fix_U) sn = Param(np.array(sn0),", "lengthscales: ' + str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag(", "if kern.ktype == \"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else:", "variance is fixed or optimized \"\"\" self.name = 'npode' self.whiten = whiten self.kern", "'npode' self.whiten = whiten self.kern = kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z", "tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter", "* self.jitter return Kzz def U(self): U = self.U if self.whiten: Lz =", "T is the number of locations. Returns: TxD tensor of differential function (GP", "on input locations \"\"\" U = self.U Z = self.Z kern = self.kern", "== \"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype ==", "else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx", "summary whiten: Boolean. Currently we perform the optimization only in the white domain", "t = [t] path = self.integrator.forward(x0,t,Nw) path = path[0] return path def __str__(self):", "gpflow import transforms from param import Param float_type = tf.float64 jitter0 = 1e-6", "we perform the optimization only in the white domain summ: Boolean for Tensorflow", "def U(self): U = self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U)", "initial signal variance kern: Kernel object for GP interpolation diffus: BrownianMotion object for", "= tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not", "= fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg =", "integrator.forward(x0,t) path = path[0] return path def Kzz(self): kern = self.kern Z =", "import transforms from param import Param float_type = tf.float64 jitter0 = 1e-6 class", "self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw =", "M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if kern.ktype ==", "summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model Args: Z0: Numpy matrix of initial", "level summ: Boolean for Tensorflow summary whiten: Boolean. Currently we perform the optimization", "sample(self,x0,t,Nw): \"\"\" Draws random samples from a learned SDE system Args: Nw: Integer", "rep = 'noise variance: ' + str(self.sn.eval()) + \\ '\\nsignal variance: ' +", "integrator = ODERK4(self) path = integrator.forward(x0,t) path = path[0] return path def Kzz(self):", "tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g =", "current time (used if diffusion depends on time) Returns: A tensor of the", "U() self.sn = sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z", "M being the number of inducing points. sn0: Numpy vector of size 1xD", "vector of size 1xD for initial signal variance kern: Kernel object for GP", "value of the differential function at location(s) X. Args: X: TxD tensor of", "= path[0] return path def Kzz(self): kern = self.kern Z = self.Z M", "size MxD, M being the number of inducing points. sn0: Numpy vector of", "= diffus self.integrator = SDEEM(self) def build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior()", "self.Z = Z() self.U = U() self.sn = sn() self.D = U.shape[1] self.integrator", "the optimization only in the white domain fix_Z: Boolean - whether inducing locations", "current states (in rows) t: current time (used if diffusion depends on time)", "time (used if diffusion depends on time) Returns: A tensor of the same", "(used if diffusion depends on time) Returns: A tensor of the same shape", "size MxD, M being the number of inducing points. U0: Numpy matrix of", "super().build_prior() pg = self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def", "= tf.matmul(Lz,U) return U def __str__(self): rep = 'noise variance: ' + str(self.sn.eval())", "with tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\", summ = False, fixed =", "(in rows) t: current time (used if diffusion depends on time) Returns: A", "rbf\" kernel if not kern.ktype == \"id\" and not kern.ktype == \"kr\" :", "or optimized fix_sn: Boolean - whether noise variance is fixed or optimized \"\"\"", "numpy as np import tensorflow as tf import tensorflow.contrib.distributions as tfd from integrators", "rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model Args:", "kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type)", "Boolean - whether noise variance is fixed or optimized \"\"\" self.name = 'npode'", "class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\",", "tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype == \"kr\":", "= Param(np.array(sn0), name = \"sn\", summ = summ, fixed = fix_sn, transform =", "tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral", "= kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D,", "tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def U(self): U = self.U if self.whiten:", "= fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute the value of", "model Args: Z0: Numpy matrix of initial inducing points of size MxD, M", "build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.Ug), covariance_matrix=self.kern.K(self.Zg,self.Zg))", "tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X) A =", "= fix_Z self.fix_sn = fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements GP", "summ: Boolean for Tensorflow summary whiten: Boolean. Currently we perform the optimization only", "def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples from a learned", "function (GP conditional) computed on input locations \"\"\" U = self.U Z =", "from param import Param float_type = tf.float64 jitter0 = 1e-6 class NPODE: def", "motion Args: X: current states (in rows) t: current time (used if diffusion", "Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A", "fixed = fix_Z) Ug = Param(U0, name = \"U\", summ = False, fixed", "= fix_U def g(self,X,t): \"\"\" generates state dependent brownian motion Args: X: current", "transform = transforms.Log1pe()) self.Z = Z() self.U = U() self.sn = sn() self.D", "jitter: Float of jitter level summ: Boolean for Tensorflow summary whiten: Boolean. Currently", "Kzz def U(self): U = self.U if self.whiten: Lz = tf.cholesky(self.Kzz()) U =", "if diffusion depends on time) Returns: A tensor of the same shape as", "whiten self.kern = kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name", "\"\"\" Draws random samples from a learned SDE system Args: Nw: Integer number", "# dim of state if kern.ktype == \"id\": Kzz = kern.K(Z) + tf.eye(M,", "\"\"\" Computes the integral and returns the path Args: x0: Python/numpy array of", "loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if self.whiten: mvn", "optimized \"\"\" self.name = 'npode' self.whiten = whiten self.kern = kern self.jitter =", "f = tf.matmul(A, U, transpose_a=True) # transformation for \"id - rbf\" kernel if", "are jitter: Float of jitter level summ: Boolean for Tensorflow summary whiten: Boolean.", "input locations \"\"\" U = self.U Z = self.Z kern = self.kern N", "tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype == \"id\": Kzz = kern.K(Zg) + tf.eye(M,", "== \"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz =", "points. sn0: Numpy vector of size 1xD for initial signal variance kern: Kernel", "D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw) path = path[0]", "tf.matmul(Lz,U) return U def __str__(self): rep = 'noise variance: ' + str(self.sn.eval()) +", "locations, T is the number of locations. Returns: TxD tensor of differential function", "parameterizing how denser the integration points are jitter: Float of jitter level summ:", "if self.kern.ktype == \"id\" or self.kern.ktype == \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag(", "* self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return Kzz", "summ = False, fixed = fix_Z) U = Param(U0, name = \"U\", summ", "+ tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X) A", "Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def __str__(self): rep = 'noise", "__str__(self): rep = 'noise variance: ' + str(self.sn.eval()) + \\ '\\nsignal variance: '", "= '\\ndiff signal variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' +", "= [t] path = self.integrator.forward(x0,t,Nw) path = path[0] return path def __str__(self): return", "self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)", "function at location(s) X. Args: X: TxD tensor of input locations, T is", "is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus", "if kern.ktype == \"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else:", "of input locations, T is the number of locations. Returns: TxD tensor of", "* self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz =", "GP interpolation to compute the value of the differential function at location(s) X.", "def Kzz(self): kern = self.kern Z = self.Z M = tf.shape(Z)[0] D =", "system Args: Nw: Integer number of samples x0: Python/numpy array of initial value", "return g*dw def __str__(self): rep = '\\ndiff signal variance: ' + str(self.kern.sf.eval()) +", "in range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance(", "A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type)", "path = path[0] return path def Kzz(self): kern = self.kern Z = self.Z", "not kern.ktype == \"kr\" : f = tf.reshape(f,[N,D]) return f def build_prior(self): if", "mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U)))", "self.jitter = jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\", summ =", "1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model Args:", "TxD tensor of differential function (GP conditional) computed on input locations \"\"\" U", "import OperatorKernel from gpflow import transforms from param import Param float_type = tf.float64", "conditional) computed on input locations \"\"\" U = self.U Z = self.Z kern", "locations. Returns: TxD tensor of differential function (GP conditional) computed on input locations", "self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs =", "\"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Z)", "fix_sn, transform = transforms.Log1pe()) self.Z = Z() self.U = U() self.sn = sn()", "rep def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn = tfd.MultivariateNormalFullCovariance(", "the NPODE model Args: Z0: Numpy matrix of initial inducing points of size", "kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def U(self): U = self.U", "path[0] return path def __str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False,", "Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx =", "inducing points. sn0: Numpy vector of size 1xD for initial signal variance kern:", "self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def", "= super().build_prior() pg = self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw)", "= tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep =", "' + str(self.sn.eval()) + \\ '\\nsignal variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales:", "False, fixed = fix_Z) Ug = Param(U0, name = \"U\", summ = False,", "path = self.integrator.forward(x0,t,Nw) path = path[0] return path def __str__(self): return super().__str__() +", "in the white domain summ: Boolean for Tensorflow summary fix_Z: Boolean - whether", "== \"id\": Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz =", "= kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\",", "kern.ktype == \"kr\" : f = tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype", "\"Z\", summ = False, fixed = fix_Z) Ug = Param(U0, name = \"U\",", "Param(Z0, name = \"Z\", summ = False, fixed = fix_Z) Ug = Param(U0,", "integral is evaluated at Returns: Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\" #", "X: TxD tensor of input locations, T is the number of locations. Returns:", "Returns: Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw, len(t), D)", "return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples from", "= jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\", summ = False,", "for \"id - rbf\" kernel if not kern.ktype == \"id\" and not kern.ktype", "= self.Zg kern = self.kern if not kern.ktype == \"id\": raise NotImplementedError() M", "shape as X \"\"\" Ug = self.Ug Zg = self.Zg kern = self.kern", "lower=False) g = tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self):", "OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug()", "= Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U =", "self.Zg = Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z", "d in range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn =", "time) Returns: A tensor of the same shape as X \"\"\" Ug =", "f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute the value of the differential function", "random samples from a learned SDE system Args: Nw: Integer number of samples", "ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter =", "or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus self.integrator =", "float_type = tf.float64 jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor", "samples from a learned SDE system Args: Nw: Integer number of samples x0:", "of the differential function at location(s) X. Args: X: TxD tensor of input", "Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep = '\\ndiff signal", "= Param(Z0, name = \"Z\", summ = False, fixed = fix_Z) Ug =", "self.sn = sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn", "kern = self.kern if not kern.ktype == \"id\": raise NotImplementedError() M = tf.shape(Zg)[0]", "rep = '\\ndiff signal variance: ' + str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: '", "Ug = Param(U0, name = \"U\", summ = False, fixed = fix_U) self.kern", "self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples from a learned SDE system Args:", "tensor of differential function (GP conditional) computed on input locations \"\"\" U =", "MxD, M being the number of inducing points. sn0: Numpy vector of size", "fix_sn: Boolean - whether noise variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn)", "loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in", "tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else:", "object for diffusion GP interpolation s: Integer parameterizing how denser the integration points", "how denser the integration points are jitter: Float of jitter level summ: Boolean", "MxD, M being the number of inducing points. U0: Numpy matrix of initial", "fix_U def g(self,X,t): \"\"\" generates state dependent brownian motion Args: X: current states", "of the same shape as X \"\"\" Ug = self.Ug Zg = self.Zg", "Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A,", "x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw) path = path[0] return", "tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U, transpose_a=True) # transformation for \"id -", "is the number of locations. Returns: TxD tensor of differential function (GP conditional)", "rows) t: current time (used if diffusion depends on time) Returns: A tensor", "# transformation for \"id - rbf\" kernel if not kern.ktype == \"id\" and", "kern = self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1] #", "= summ, fixed = fix_sn, transform = transforms.Log1pe()) self.Z = Z() self.U =", "computed on input locations \"\"\" U = self.U Z = self.Z kern =", "kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X)", "= self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim", "build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype == \"kr\": if self.whiten: mvn =", "= path[0] return path def __str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion: def", "def build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype == \"kr\": if self.whiten: mvn", "\"\"\" Constructor for the NPSDE model Args: Z0: Numpy matrix of initial inducing", "are fixed or optimized fix_sn: Boolean - whether noise variance is fixed or", "ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter", "name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter = 1e-6", "not kern.ktype == \"id\" and not kern.ktype == \"kr\" : f = tf.reshape(f,[N,D])", "return U def __str__(self): rep = 'noise variance: ' + str(self.sn.eval()) + \\", "tf.float64 jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the", "self.Ug = Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U", "x0: Python/numpy array of initial value t: Python/numpy array of time points the", "summ = summ, fixed = fix_sn, transform = transforms.Log1pe()) self.Z = Z() self.U", "* self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx,", "Python/numpy array of time points the integral is evaluated at Returns: Tensor of", "signal variance kern: Kernel object for GP interpolation diffus: BrownianMotion object for diffusion", ": f = tf.reshape(f,[N,D]) return f def build_prior(self): if self.kern.ktype == \"id\" or", "self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if kern.ktype", "evaluated at Returns: Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw,", "tensor of the same shape as X \"\"\" Ug = self.Ug Zg =", "as tfd from integrators import ODERK4, SDEEM from kernels import OperatorKernel from gpflow", "self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral and returns the path Args: x0:", "+ tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) *", "Args: X: TxD tensor of input locations, T is the number of locations.", "= fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute", "pf = super().build_prior() pg = self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1): return", "'\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten: mvn =", "import numpy as np import tensorflow as tf import tensorflow.contrib.distributions as tfd from", "if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g = tf.matmul(A, Ug, transpose_a=True)", "OperatorKernel from gpflow import transforms from param import Param float_type = tf.float64 jitter0", "dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep = '\\ndiff signal variance: '", "def __str__(self): rep = 'noise variance: ' + str(self.sn.eval()) + \\ '\\nsignal variance:", "= Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z =", "Returns: A tensor of the same shape as X \"\"\" Ug = self.Ug", "+ str(self.kern.sf.eval()) + \\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep def build_prior(self):", "or optimized \"\"\" self.name = 'npode' self.whiten = whiten self.kern = kern self.jitter", "\"id\" or self.kern.ktype == \"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn", "at Returns: ODE solution computed at t, tensor of size [len(t),len(x0)] \"\"\" x0", "class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE model Args: Z0:", "\"\"\" # returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path", "as X \"\"\" Ug = self.Ug Zg = self.Zg kern = self.kern if", "GP interpolation diffus: BrownianMotion object for diffusion GP interpolation s: Integer parameterizing how", "fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten =", "= self.Z kern = self.kern N = tf.shape(X)[0] M = tf.shape(Z)[0] D =", "Constructor for the NPSDE model Args: Z0: Numpy matrix of initial inducing points", "else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts):", "kern.ktype == \"id\": raise NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype", "N = tf.shape(X)[0] M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state", "\"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus self.integrator = SDEEM(self) def", "kern.ktype == \"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz", "the integration points are jitter: Float of jitter level summ: Boolean for Tensorflow", "whiten: Boolean. Currently we perform the optimization only in the white domain summ:", "from integrators import ODERK4, SDEEM from kernels import OperatorKernel from gpflow import transforms", "self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U = fix_U def", "g = tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep", "matrix of initial inducing points of size MxD, M being the number of", "sn = Param(np.array(sn0), name = \"sn\", summ = summ, fixed = fix_sn, transform", "Kernel object for GP interpolation jitter: Float of jitter level whiten: Boolean. Currently", "the differential function at location(s) X. Args: X: TxD tensor of input locations,", "self.name = 'npsde' self.diffus = diffus self.integrator = SDEEM(self) def build_prior(self): pf =", "t: current time (used if diffusion depends on time) Returns: A tensor of", "dtype=float_type) * self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return", "return path def __str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False):", "def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name = \"Z\", summ =", "initial signal variance kern: Kernel object for GP interpolation jitter: Float of jitter", "self.jitter return Kzz def U(self): U = self.U if self.whiten: Lz = tf.cholesky(self.Kzz())", "return f def build_prior(self): if self.kern.ktype == \"id\" or self.kern.ktype == \"kr\": if", "kern.ktype == \"id\" and not kern.ktype == \"kr\" : f = tf.reshape(f,[N,D]) return", "= fix_Z) U = Param(U0, name = \"U\", summ = False, fixed =", "np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw) path = path[0] return path def", "= np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self) path = integrator.forward(x0,t) path =", "def f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute the value of the differential", "Tensorflow summary whiten: Boolean. Currently we perform the optimization only in the white", "predict(self,x0,t): \"\"\" Computes the integral and returns the path Args: x0: Python/numpy array", "Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx =", "at Returns: Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\" # returns (Nw, len(t),", "[t] path = self.integrator.forward(x0,t,Nw) path = path[0] return path def __str__(self): return super().__str__()", "import ODERK4, SDEEM from kernels import OperatorKernel from gpflow import transforms from param", "fix_U def f(self,X,t=[0]): \"\"\" Implements GP interpolation to compute the value of the", "Float of jitter level summ: Boolean for Tensorflow summary whiten: Boolean. Currently we", "self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples from a", "= tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U))", "points the integral is evaluated at Returns: ODE solution computed at t, tensor", "self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U = fix_U def f(self,X,t=[0]): \"\"\" Implements", "matrix of initial inducing vectors of size MxD, M being the number of", "for initial signal variance kern: Kernel object for GP interpolation diffus: BrownianMotion object", "sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn", "\"id\" and not kern.ktype == \"kr\" : f = tf.reshape(f,[N,D]) return f def", "Zg = Param(Z0, name = \"Z\", summ = False, fixed = fix_Z) Ug", "not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U, transpose_a=True) #", "of locations. Returns: TxD tensor of differential function (GP conditional) computed on input", "Param(Z0, name = \"Z\", summ = False, fixed = fix_Z) U = Param(U0,", "jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE", "Z = Param(Z0, name = \"Z\", summ = False, fixed = fix_Z) U", "= \"sn\", summ = summ, fixed = fix_sn, transform = transforms.Log1pe()) self.Z =", "- rbf\" kernel if not kern.ktype == \"id\" and not kern.ktype == \"kr\"", "covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if self.whiten: mvn =", "pg = self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1):", "whiten self.fix_Z = fix_Z self.fix_U = fix_U def g(self,X,t): \"\"\" generates state dependent", "Boolean. Currently we perform the optimization only in the white domain fix_Z: Boolean", "param import Param float_type = tf.float64 jitter0 = 1e-6 class NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0,", "Returns: TxD tensor of differential function (GP conditional) computed on input locations \"\"\"", "np import tensorflow as tf import tensorflow.contrib.distributions as tfd from integrators import ODERK4,", "dtype=float_type) * self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz", "return pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def", "\\ '\\ndiff lengthscales: ' + str(self.kern.ell.eval()) return rep def build_prior(self): if self.whiten: mvn", "the white domain summ: Boolean for Tensorflow summary fix_Z: Boolean - whether inducing", "learned SDE system Args: Nw: Integer number of samples x0: Python/numpy array of", "variance: ' + str(self.sn.eval()) + \\ '\\nsignal variance: ' + str(self.kern.sf.eval()) + \\", "perform the optimization only in the white domain fix_Z: Boolean - whether inducing", "NotImplementedError() M = tf.shape(Zg)[0] D = tf.shape(X)[1] if kern.ktype == \"id\": Kzz =", "Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if", "of time points the integral is evaluated at Returns: ODE solution computed at", "def __str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'):", "kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type)", "Kernel object for GP interpolation diffus: BrownianMotion object for diffusion GP interpolation s:", "size 1xD for initial signal variance kern: Kernel object for GP interpolation jitter:", "ODERK4, SDEEM from kernels import OperatorKernel from gpflow import transforms from param import", "self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U", "= self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1] # dim of state if", "of initial inducing vectors of size MxD, M being the number of inducing", "for Tensorflow summary fix_Z: Boolean - whether inducing locations are fixed or optimized", "NPODE: def __init__(self,Z0,U0,sn0,kern,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPODE model Args: Z0: Numpy", "= Param(Z0, name = \"Z\", summ = False, fixed = fix_Z) U =", "Returns: ODE solution computed at t, tensor of size [len(t),len(x0)] \"\"\" x0 =", "Computes the integral and returns the path Args: x0: Python/numpy array of initial", "import tensorflow as tf import tensorflow.contrib.distributions as tfd from integrators import ODERK4, SDEEM", "jitter level summ: Boolean for Tensorflow summary whiten: Boolean. Currently we perform the", "False, fixed = fix_U) sn = Param(np.array(sn0), name = \"sn\", summ = summ,", "np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self) path = integrator.forward(x0,t) path = path[0]", "of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self)", "'noise variance: ' + str(self.sn.eval()) + \\ '\\nsignal variance: ' + str(self.kern.sf.eval()) +", "= tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) g", "Ug = self.Ug Zg = self.Zg kern = self.kern if not kern.ktype ==", "if not kern.ktype == \"id\" and not kern.ktype == \"kr\" : f =", "return super().__str__() + self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg =", "Nw: Integer number of samples x0: Python/numpy array of initial value t: Python/numpy", "tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\", summ = False, fixed = fix_Z)", "= tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return", "tensorflow.contrib.distributions as tfd from integrators import ODERK4, SDEEM from kernels import OperatorKernel from", "the number of inducing points. U0: Numpy matrix of initial inducing vectors of", "denser the integration points are jitter: Float of jitter level summ: Boolean for", "= kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z,", "tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep = '\\ndiff signal variance: ' + str(self.kern.sf.eval())", "variance kern: Kernel object for GP interpolation diffus: BrownianMotion object for diffusion GP", "optimized fix_sn: Boolean - whether noise variance is fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter,", "SDEEM(self) def build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior() return pf + pg", "forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw): \"\"\" Draws random samples from a learned SDE", "domain fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U: Boolean", "path = path[0] return path def __str__(self): return super().__str__() + self.diffus.__str__() class BrownianMotion:", "size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] integrator = ODERK4(self) path", "GP interpolation jitter: Float of jitter level whiten: Boolean. Currently we perform the", "jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name = \"Z\", summ = False, fixed", "U = Param(U0, name = \"U\", summ = False, fixed = fix_U) sn", "transforms.Log1pe()) self.Z = Z() self.U = U() self.sn = sn() self.D = U.shape[1]", "jitter: Float of jitter level whiten: Boolean. Currently we perform the optimization only", "+ \\ '\\nsignal variance: ' + str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval())", "\"kr\": if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.U[:,0])) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z))", "kern = self.kern Z = self.Z M = tf.shape(Z)[0] D = tf.shape(Z)[1] #", "= tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.U[:,0]), covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)]) else: if", "brownian motion Args: X: current states (in rows) t: current time (used if", "fix_U) sn = Param(np.array(sn0), name = \"sn\", summ = summ, fixed = fix_sn,", "= kern.K(Zg, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True) if not self.whiten: A =", "\"\"\" self.name = 'npode' self.whiten = whiten self.kern = kern self.jitter = jitter", "the path Args: x0: Python/numpy array of initial value t: Python/numpy array of", "covariance_matrix=self.kern.K(self.Z,self.Z)) probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U))) return probs def forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\"", "name = \"Z\", summ = False, fixed = fix_Z) U = Param(U0, name", "self.diffus.__str__() class BrownianMotion: def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False, fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name =", "= 1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U = fix_U def g(self,X,t):", "is evaluated at Returns: Tensor of size [Nw,len(t),len(x0)] storing samples \"\"\" # returns", "build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior() return pf + pg def g(self,ts,Nw=1):", "def build_prior(self): pf = super().build_prior() pg = self.diffus.build_prior() return pf + pg def", "def sample(self,x0,t,Nw): \"\"\" Draws random samples from a learned SDE system Args: Nw:", "= False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ, fix_ell=fix_ell,", "Args: Nw: Integer number of samples x0: Python/numpy array of initial value t:", "= tf.shape(X)[1] if kern.ktype == \"id\": Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) *", "1xD for initial signal variance kern: Kernel object for GP interpolation jitter: Float", "for GP interpolation diffus: BrownianMotion object for diffusion GP interpolation s: Integer parameterizing", "storing samples \"\"\" # returns (Nw, len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t =", "summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten", "transformation for \"id - rbf\" kernel if not kern.ktype == \"id\" and not", "return path def Kzz(self): kern = self.kern Z = self.Z M = tf.shape(Z)[0]", "of jitter level summ: Boolean for Tensorflow summary whiten: Boolean. Currently we perform", "self.kern = kern self.jitter = jitter with tf.name_scope(\"NPDE\"): Z = Param(Z0, name =", "fixed or optimized \"\"\" super().__init__(Z0,U0,sn0,kern,jitter=jitter, summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn) self.name = 'npsde' self.diffus = diffus self.integrator", "the optimization only in the white domain summ: Boolean for Tensorflow summary fix_Z:", "lower=True) if not self.whiten: A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False) f = tf.matmul(A, U,", "g*dw def __str__(self): rep = '\\ndiff signal variance: ' + str(self.kern.sf.eval()) + \\", "tf.matmul(A, Ug, transpose_a=True) dw = tf.random_normal(tf.shape(X),dtype=float_type) return g*dw def __str__(self): rep = '\\ndiff", "len(t), D) x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t = [t] path = self.integrator.forward(x0,t,Nw) path =", "signal variance kern: Kernel object for GP interpolation jitter: Float of jitter level", "+ str(self.kern.sf.eval()) + \\ '\\nlengthscales: ' + str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def", "Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter return Kzz def U(self): U", "fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten = whiten", "optimized fix_U: Boolean - whether inducing vectors are fixed or optimized fix_sn: Boolean", "if self.whiten: Lz = tf.cholesky(self.Kzz()) U = tf.matmul(Lz,U) return U def __str__(self): rep", "= transforms.Log1pe()) self.Z = Z() self.U = U() self.sn = sn() self.D =", "at location(s) X. Args: X: TxD tensor of input locations, T is the", "self.jitter Lz = tf.cholesky(Kzz) Kzx = kern.K(Z, X) A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)", "samples x0: Python/numpy array of initial value t: Python/numpy array of time points", "ODERK4(self) path = integrator.forward(x0,t) path = path[0] return path def Kzz(self): kern =", "of size MxD, M being the number of inducing points. U0: Numpy matrix", "name = \"Z\", summ = False, fixed = fix_Z) Ug = Param(U0, name", "* self.jitter else: Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter Lz =", "tfd from integrators import ODERK4, SDEEM from kernels import OperatorKernel from gpflow import", "U = tf.matmul(Lz,U) return U def __str__(self): rep = 'noise variance: ' +", "Integer number of samples x0: Python/numpy array of initial value t: Python/numpy array", "return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral and returns the path Args:", "forward(self,x0,ts): return self.integrator.forward(x0=x0,ts=ts) def predict(self,x0,t): \"\"\" Computes the integral and returns the path", "size 1xD for initial signal variance kern: Kernel object for GP interpolation diffus:", "for diffusion GP interpolation s: Integer parameterizing how denser the integration points are", "+ tf.eye(M, dtype=float_type) * self.jitter else: Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) *", "def build_prior(self): if self.whiten: mvn = tfd.MultivariateNormalDiag( loc=tf.zeros_like(self.Ug)) else: mvn = tfd.MultivariateNormalFullCovariance( loc=tf.zeros_like(self.Ug),", "on time) Returns: A tensor of the same shape as X \"\"\" Ug", "dtype=float_type) * self.jitter return Kzz def U(self): U = self.U if self.whiten: Lz", "str(self.kern.ell.eval()) return rep class NPSDE(NPODE): def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0, summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False): \"\"\" Constructor for the NPSDE", "pf + pg def g(self,ts,Nw=1): return self.diffus.g(ts=ts,Nw=Nw) def forward(self,x0,ts,Nw=1): return self.integrator.forward(x0=x0,ts=ts,Nw=Nw) def sample(self,x0,t,Nw):", "summ = False, fixed = fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype=\"id\", name='Kernel', summ=summ,", "\"\"\" Constructor for the NPODE model Args: Z0: Numpy matrix of initial inducing", "Z0: Numpy matrix of initial inducing points of size MxD, M being the", "of samples x0: Python/numpy array of initial value t: Python/numpy array of time", "for the NPODE model Args: Z0: Numpy matrix of initial inducing points of", "level whiten: Boolean. Currently we perform the optimization only in the white domain", "== \"id\" and not kern.ktype == \"kr\" : f = tf.reshape(f,[N,D]) return f", "fixed or optimized fix_U: Boolean - whether inducing vectors are fixed or optimized", "= ODERK4(self) path = integrator.forward(x0,t) path = path[0] return path def Kzz(self): kern", "solution computed at t, tensor of size [len(t),len(x0)] \"\"\" x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1)) t" ]
[ "= float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat", "else: logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL) def __repr__(self): return f'{type(self).__name__,", "<gh_stars>10-100 import time from random import randrange import grequests from NodeState import NodeState", "import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s:", "candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped = False self.followers = [peer", "not self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client()", "self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self}", "logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client", "self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self} send", "= Client() with client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for", "{response.json()}') else: logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL) def __repr__(self): return", "class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index", "= candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped = False self.followers =", "%(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term =", "False self.followers = [peer for peer in self.cluster if peer != self.node] self.election_timeout", "import time from random import randrange import grequests from NodeState import NodeState from", "= [peer for peer in self.cluster if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX", "= candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped =", "HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',", "= candidate.last_applied_index self.entries = candidate.entries self.stopped = False self.followers = [peer for peer", "for peer in self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is", "import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update,", "import NodeState from client import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging", "grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self} got heartbeat from follower: {response.json()}')", "candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries", "from random import randrange import grequests from NodeState import NodeState from client import", "from NodeState import NodeState from client import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX", "client import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import", "heartbeat from follower: {response.json()}') else: logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL)", "peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not", "if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while", "is not None: logging.info(f'{self} got heartbeat from follower: {response.json()}') else: logging.info(f'{self} got heartbeat", "self.stopped = False self.followers = [peer for peer in self.cluster if peer !=", "candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped = False", "send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader,", "from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState):", "ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self,", "got heartbeat from follower: {response.json()}') else: logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================')", "client = Client() with client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session)", "peer in self.cluster if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX))", "gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self} got heartbeat from follower: {response.json()}') else:", "NodeState import NodeState from client import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import", "cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s -", "= [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers ] for response in", "= False self.followers = [peer for peer in self.cluster if peer != self.node]", "[peer for peer in self.cluster if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX /", "self.entries = candidate.entries self.stopped = False self.followers = [peer for peer in self.cluster", "logging.info(f'{self} got heartbeat from follower: {response.json()}') else: logging.info(f'{self} got heartbeat from follower: None')", "Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update, send_heartbeat", "from follower: {response.json()}') else: logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL) def", "self.followers = [peer for peer in self.cluster if peer != self.node] self.election_timeout =", "send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate):", "self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with", "logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class", "heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client", "randrange import grequests from NodeState import NodeState from client import Client from cluster", "for peer in self.cluster if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2,", "if response is not None: logging.info(f'{self} got heartbeat from follower: {response.json()}') else: logging.info(f'{self}", "logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client as session: posts = [", "not None: logging.info(f'{self} got heartbeat from follower: {response.json()}') else: logging.info(f'{self} got heartbeat from", "= candidate.entries self.stopped = False self.followers = [peer for peer in self.cluster if", "Client() with client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer", "ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S',", "heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client as session:", "logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL) def __repr__(self): return f'{type(self).__name__, self.node.id,", "%(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term", "to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client as session: posts", "follower: {response.json()}') else: logging.info(f'{self} got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL) def __repr__(self):", "grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL):", "/ 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat to followers')", "json=self.node, session=session) for peer in self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if", "in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self} got heartbeat from follower:", "time from random import randrange import grequests from NodeState import NodeState from client", "peer in self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not", "in self.cluster if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def", "super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries =", "self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries", "HEART_BEAT_INTERVAL) client = Client() with client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node,", "grequests from NodeState import NodeState from client import Client from cluster import HEART_BEAT_INTERVAL,", "send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat',", "!= self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped:", "def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index =", "import logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)", "response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self} got heartbeat from", "Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index", "self.cluster if peer != self.node] self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self):", "__init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index", "None: logging.info(f'{self} got heartbeat from follower: {response.json()}') else: logging.info(f'{self} got heartbeat from follower:", "followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client as session: posts =", "got heartbeat from follower: None') logging.info('========================================================================') time.sleep(HEART_BEAT_INTERVAL) def __repr__(self): return f'{type(self).__name__, self.node.id, self.current_term}'", "posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers ] for response", "session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers ] for", "candidate.entries self.stopped = False self.followers = [peer for peer in self.cluster if peer", "logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node)", "def heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL)", "send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client = Client() with client as", "datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index", "self.current_term = candidate.current_term self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped", "session=session) for peer in self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response", "from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s", "with client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in", "NodeState from client import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from", "candidate.last_applied_index self.entries = candidate.entries self.stopped = False self.followers = [peer for peer in", "in self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None:", "- %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term", "while not self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================') send_heartbeat(self, HEART_BEAT_INTERVAL) client =", "for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self} got heartbeat", "monitor import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def", "float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat to", "self.followers ] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self}", "as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers ]", "self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped = False self.followers = [peer for", "level=logging.INFO) class Leader(NodeState): def __init__(self, candidate): super(Leader, self).__init__(candidate.node) self.current_term = candidate.current_term self.commit_index =", "from client import Client from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX import logging from monitor", "2, ELECTION_TIMEOUT_MAX)) def heartbeat(self): while not self.stopped: logging.info(f'{self} send heartbeat to followers') logging.info('========================================================================')", "import grequests from NodeState import NodeState from client import Client from cluster import", "response is not None: logging.info(f'{self} got heartbeat from follower: {response.json()}') else: logging.info(f'{self} got", "import send_state_update, send_heartbeat logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) class Leader(NodeState): def __init__(self,", "self.commit_index = candidate.commit_index self.last_applied_index = candidate.last_applied_index self.entries = candidate.entries self.stopped = False self.followers", "import randrange import grequests from NodeState import NodeState from client import Client from", "client as session: posts = [ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers", "[ grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session) for peer in self.followers ] for response in grequests.map(posts,", "random import randrange import grequests from NodeState import NodeState from client import Client", "] for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL): if response is not None: logging.info(f'{self} got" ]
[ "to the \"BoilerComplianceQueryServlet\". #There should be exactly one such url. for i in", "been prescreened to ensure that data exist. \"\"\" allUrls = soup.find_all('a') #get the", "such url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\"", "soup ) else: return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ): return", "return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\" +", "prescreened to ensure that data exist. \"\"\" allUrls = soup.find_all('a') #get the url", "DOB Boiler Data. We assume that the soup has been prescreened to ensure", "== -1 def extractDOBDataFromSoup( soup ): \"\"\" Takes in data structure from BeautifulSoup", "allUrls = soup.find_all('a') #get the url with the reference to the \"BoilerComplianceQueryServlet\". #There", "i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2", "= urlToSoup( url ) if hasDOBData( soup ): return extractDOBDataFromSoup( soup ) else:", "Data. We assume that the soup has been prescreened to ensure that data", "given application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup(", "= requestToDOBUrl( boroNum, houseNum, houseStreet ) soup = urlToSoup( url ) if hasDOBData(", "extractDOBDataFromSoup( soup ) else: return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ):", "+ houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes in URL and returns a", "BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return soup def hasDOBData( soup ): \"Checks", "structure from BeautifulSoup and parses for DOB Boiler Data. We assume that the", "def urlToSoup( url ): \"Takes in URL and returns a soup object of", "soup ): return extractDOBDataFromSoup( soup ) else: return \"Invalid Query\" def requestToDOBUrl( boroNum,", "return extractDOBDataFromSoup( soup ) else: return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet", "def requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) +", "for row in boilerTables[3].find_all('tr'): #grab the table with boiler data records.append(row.get_text().strip('\\n').split('\\n')) return records", "-1 def extractDOBDataFromSoup( soup ): \"\"\" Takes in data structure from BeautifulSoup and", "url ): \"Takes in URL and returns a soup object of the contents.\"", "str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes in URL", "tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup ): \"\"\" Takes in data structure", "): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\"", "boroNum, houseNum, houseStreet ): url = requestToDOBUrl( boroNum, houseNum, houseStreet ) soup =", "+ str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes in", "webpage = urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode", "\"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes in URL and returns", "houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes in URL and returns a soup", "soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup ): \"\"\" Takes in", "return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup ): \"\"\" Takes in data", "soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records = list() for row in boilerTables[3].find_all('tr'):", "url with the reference to the \"BoilerComplianceQueryServlet\". #There should be exactly one such", "boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\" +", "getDOBBoilerData( boroNum, houseNum, houseStreet ): url = requestToDOBUrl( boroNum, houseNum, houseStreet ) soup", "reference to the \"BoilerComplianceQueryServlet\". #There should be exactly one such url. for i", "houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum) +", "-1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records", "BeautifulSoup and parses for DOB Boiler Data. We assume that the soup has", "+ \"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace(' ','+'))", "url ) soup = BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return soup def", "number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup ):", "if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables", ") else: return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\"", "a given application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def", "We assume that the soup has been prescreened to ensure that data exist.", "): \"Checks to see whether DEP data exist for a given application number.\"", "= urlToSoup(url) boilerTables = soup2.find_all('table') records = list() for row in boilerTables[3].find_all('tr'): #grab", "exactly one such url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url", "\"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes", "data structure from BeautifulSoup and parses for DOB Boiler Data. We assume that", "the url with the reference to the \"BoilerComplianceQueryServlet\". #There should be exactly one", "i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables =", "one such url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url =", "else: return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" +", "): \"\"\" Takes in data structure from BeautifulSoup and parses for DOB Boiler", "url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records =", "\"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records = list() for", "return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\"", "','+')) def urlToSoup( url ): \"Takes in URL and returns a soup object", "soup2.find_all('table') records = list() for row in boilerTables[3].find_all('tr'): #grab the table with boiler", "soup def hasDOBData( soup ): \"Checks to see whether DEP data exist for", ") soup = BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return soup def hasDOBData(", "with the reference to the \"BoilerComplianceQueryServlet\". #There should be exactly one such url.", "for DOB Boiler Data. We assume that the soup has been prescreened to", "has been prescreened to ensure that data exist. \"\"\" allUrls = soup.find_all('a') #get", "the \"BoilerComplianceQueryServlet\". #There should be exactly one such url. for i in allUrls:", "+ i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records = list() for row", "if hasDOBData( soup ): return extractDOBDataFromSoup( soup ) else: return \"Invalid Query\" def", "extractDOBDataFromSoup( soup ): \"\"\" Takes in data structure from BeautifulSoup and parses for", "def hasDOBData( soup ): \"Checks to see whether DEP data exist for a", "+ \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url ): \"Takes in URL and", "\"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def", ") # soup.unicode return soup def hasDOBData( soup ): \"Checks to see whether", "soup object of the contents.\" webpage = urllib.request.urlopen( url ) soup = BeautifulSoup(", "= soup2.find_all('table') records = list() for row in boilerTables[3].find_all('tr'): #grab the table with", "be exactly one such url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1:", "URL and returns a soup object of the contents.\" webpage = urllib.request.urlopen( url", "): return extractDOBDataFromSoup( soup ) else: return \"Invalid Query\" def requestToDOBUrl( boroNum, houseNum,", "Takes in data structure from BeautifulSoup and parses for DOB Boiler Data. We", "requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\"", "return soup def hasDOBData( soup ): \"Checks to see whether DEP data exist", "records = list() for row in boilerTables[3].find_all('tr'): #grab the table with boiler data", "parses for DOB Boiler Data. We assume that the soup has been prescreened", "\"html.parser\" ) # soup.unicode return soup def hasDOBData( soup ): \"Checks to see", "import urllib.request from bs4 import BeautifulSoup def getDOBBoilerData( boroNum, houseNum, houseStreet ): url", "ensure that data exist. \"\"\" allUrls = soup.find_all('a') #get the url with the", "#There should be exactly one such url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\")", "houseStreet ) soup = urlToSoup( url ) if hasDOBData( soup ): return extractDOBDataFromSoup(", "\"Takes in URL and returns a soup object of the contents.\" webpage =", "url = requestToDOBUrl( boroNum, houseNum, houseStreet ) soup = urlToSoup( url ) if", "houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum)", "def getDOBBoilerData( boroNum, houseNum, houseStreet ): url = requestToDOBUrl( boroNum, houseNum, houseStreet )", "# soup.unicode return soup def hasDOBData( soup ): \"Checks to see whether DEP", "that the soup has been prescreened to ensure that data exist. \"\"\" allUrls", "soup ): \"Checks to see whether DEP data exist for a given application", "returns a soup object of the contents.\" webpage = urllib.request.urlopen( url ) soup", "str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url", "DEP data exist for a given application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO", "in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 =", "and returns a soup object of the contents.\" webpage = urllib.request.urlopen( url )", "the soup has been prescreened to ensure that data exist. \"\"\" allUrls =", "soup = urlToSoup( url ) if hasDOBData( soup ): return extractDOBDataFromSoup( soup )", "data exist. \"\"\" allUrls = soup.find_all('a') #get the url with the reference to", "i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records = list() for row in", "exist for a given application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") ==", "\"Checks to see whether DEP data exist for a given application number.\" tables", "urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return soup", "of the contents.\" webpage = urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(), \"html.parser\"", "that data exist. \"\"\" allUrls = soup.find_all('a') #get the url with the reference", "boilerTables = soup2.find_all('table') records = list() for row in boilerTables[3].find_all('tr'): #grab the table", "Boiler Data. We assume that the soup has been prescreened to ensure that", "def extractDOBDataFromSoup( soup ): \"\"\" Takes in data structure from BeautifulSoup and parses", "whether DEP data exist for a given application number.\" tables = soup.find_all(\"table\") return", "Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum)", "the reference to the \"BoilerComplianceQueryServlet\". #There should be exactly one such url. for", ") soup = urlToSoup( url ) if hasDOBData( soup ): return extractDOBDataFromSoup( soup", "soup has been prescreened to ensure that data exist. \"\"\" allUrls = soup.find_all('a')", "= urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return", "to ensure that data exist. \"\"\" allUrls = soup.find_all('a') #get the url with", "allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url)", "soup = BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return soup def hasDOBData( soup", "bs4 import BeautifulSoup def getDOBBoilerData( boroNum, houseNum, houseStreet ): url = requestToDOBUrl( boroNum,", "assume that the soup has been prescreened to ensure that data exist. \"\"\"", "+ \"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup( url ):", "BeautifulSoup def getDOBBoilerData( boroNum, houseNum, houseStreet ): url = requestToDOBUrl( boroNum, houseNum, houseStreet", "and parses for DOB Boiler Data. We assume that the soup has been", "hasDOBData( soup ): \"Checks to see whether DEP data exist for a given", "webpage.read(), \"html.parser\" ) # soup.unicode return soup def hasDOBData( soup ): \"Checks to", "= soup.find_all('a') #get the url with the reference to the \"BoilerComplianceQueryServlet\". #There should", "application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup", "from bs4 import BeautifulSoup def getDOBBoilerData( boroNum, houseNum, houseStreet ): url = requestToDOBUrl(", "url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" +", "RECORD\") == -1 def extractDOBDataFromSoup( soup ): \"\"\" Takes in data structure from", "soup.unicode return soup def hasDOBData( soup ): \"Checks to see whether DEP data", "should be exactly one such url. for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") !=", "\"\"\" allUrls = soup.find_all('a') #get the url with the reference to the \"BoilerComplianceQueryServlet\".", "\"\"\" Takes in data structure from BeautifulSoup and parses for DOB Boiler Data.", "for a given application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1", "+ str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace(' ','+')) def urlToSoup(", "the contents.\" webpage = urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(), \"html.parser\" )", "for i in allUrls: if i['href'].find(\"BoilerComplianceQueryServlet\") != -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href']", "soup ): \"\"\" Takes in data structure from BeautifulSoup and parses for DOB", "= soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup ): \"\"\" Takes", "houseStreet ): url = requestToDOBUrl( boroNum, houseNum, houseStreet ) soup = urlToSoup( url", "!= -1: url = \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table')", "in URL and returns a soup object of the contents.\" webpage = urllib.request.urlopen(", "(\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" + str(boroNum) + \"&houseno=\" + str(houseNum) + \"&street=\" + houseStreet.replace('", "\"BoilerComplianceQueryServlet\". #There should be exactly one such url. for i in allUrls: if", "requestToDOBUrl( boroNum, houseNum, houseStreet ) soup = urlToSoup( url ) if hasDOBData( soup", "): \"Takes in URL and returns a soup object of the contents.\" webpage", "= list() for row in boilerTables[3].find_all('tr'): #grab the table with boiler data records.append(row.get_text().strip('\\n').split('\\n'))", "from BeautifulSoup and parses for DOB Boiler Data. We assume that the soup", "soup.find_all('a') #get the url with the reference to the \"BoilerComplianceQueryServlet\". #There should be", "urlToSoup(url) boilerTables = soup2.find_all('table') records = list() for row in boilerTables[3].find_all('tr'): #grab the", "list() for row in boilerTables[3].find_all('tr'): #grab the table with boiler data records.append(row.get_text().strip('\\n').split('\\n')) return", "url ) if hasDOBData( soup ): return extractDOBDataFromSoup( soup ) else: return \"Invalid", "= \"http://a810-bisweb.nyc.gov/bisweb/\" + i['href'] soup2 = urlToSoup(url) boilerTables = soup2.find_all('table') records = list()", "\"Invalid Query\" def requestToDOBUrl( boroNum, houseNum, houseStreet ): return (\"http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet\" + \"?boro=\" +", "import BeautifulSoup def getDOBBoilerData( boroNum, houseNum, houseStreet ): url = requestToDOBUrl( boroNum, houseNum,", "houseNum, houseStreet ) soup = urlToSoup( url ) if hasDOBData( soup ): return", "data exist for a given application number.\" tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\")", "see whether DEP data exist for a given application number.\" tables = soup.find_all(\"table\")", ") if hasDOBData( soup ): return extractDOBDataFromSoup( soup ) else: return \"Invalid Query\"", "urlToSoup( url ): \"Takes in URL and returns a soup object of the", "exist. \"\"\" allUrls = soup.find_all('a') #get the url with the reference to the", "= BeautifulSoup( webpage.read(), \"html.parser\" ) # soup.unicode return soup def hasDOBData( soup ):", "tables = soup.find_all(\"table\") return tables[1].get_text().find(\"NO RECORD\") == -1 def extractDOBDataFromSoup( soup ): \"\"\"", "#get the url with the reference to the \"BoilerComplianceQueryServlet\". #There should be exactly", "a soup object of the contents.\" webpage = urllib.request.urlopen( url ) soup =", "in data structure from BeautifulSoup and parses for DOB Boiler Data. We assume", "urlToSoup( url ) if hasDOBData( soup ): return extractDOBDataFromSoup( soup ) else: return", "to see whether DEP data exist for a given application number.\" tables =", "boroNum, houseNum, houseStreet ) soup = urlToSoup( url ) if hasDOBData( soup ):", "houseNum, houseStreet ): url = requestToDOBUrl( boroNum, houseNum, houseStreet ) soup = urlToSoup(", "hasDOBData( soup ): return extractDOBDataFromSoup( soup ) else: return \"Invalid Query\" def requestToDOBUrl(", "contents.\" webpage = urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(), \"html.parser\" ) #", "object of the contents.\" webpage = urllib.request.urlopen( url ) soup = BeautifulSoup( webpage.read(),", "): url = requestToDOBUrl( boroNum, houseNum, houseStreet ) soup = urlToSoup( url )", "urllib.request from bs4 import BeautifulSoup def getDOBBoilerData( boroNum, houseNum, houseStreet ): url =" ]
[ "target_location): \"\"\" Calculate the distance of the given location along the route Note:", "location, use the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _", "corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in route: #", "# target velocity in world coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world =", "the first route point if not prev_position: prev_position = position continue # Calculate", "neighbor lane is closer to the route # Do this only in a", "curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp", "angle -= 2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a", "covered_distance += math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared) prev_position = position", "_T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform):", "= False # Don't use the input location, use the corresponding wp as", "kenetics.s \"\"\" import carla import numpy as np import math def get_speed(vehicle): \"\"\"", "from Actor coord system to the world coord system. :param transform: :return trans_matrix:", "transform = vehicle.get_transform() # transform matrix from actor coord system to world system", "prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2) # Close to the", "calculation if distance_squared < 1.0: break if distance_squared < 400 and not distance_squared", "in m/s \"\"\" # set a initial speed for ego vehicle transform =", "2) + ((prev_position.y - position.y) ** 2) distance_squared = ((location.x - prev_position.x) **", "long route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if", ":param:speed in m/s \"\"\" # set a initial speed for ego vehicle transform", "== np.sign(curr_wp.lane_id)): # The location is within the current route interval covered_distance +=", "m/s \"\"\" # set a initial speed for ego vehicle transform = vehicle.get_transform()", "inside the current route interval, if route/lane ids match # Note: This assumes", "< distance_squared: distance_squared = new_distance_squared location = new_location else: break wp = wp.get_right_lane()", "this only in a close distance to correct route interval, otherwise the computation", "instance to ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle:", "2 + vel.y ** 2) # + vel.z ** 2) def set_vehicle_speed(vehicle, speed:", "x-axis. :param:speed in m/s \"\"\" # set a initial speed for ego vehicle", "curr_wp and wp: if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads", "point if not prev_position: prev_position = position continue # Calculate distance between previous", "Calculate the distance of the given location along the route Note: If the", "be inside the current route interval, if route/lane ids match # Note: This", "route interval # An alternative is to compare orientations, however, this also does", "small route interval # An alternative is to compare orientations, however, this also", "location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in route: # input route is", "< 400 and not distance_squared < interval_length_squared: # Check if a neighbor lane", "= new_distance_squared location = new_location else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane()", "break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location else: break", "2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to a target speed. Velocity", "carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into", "high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not None: new_location", "the location is not along the route, the route length will be returned", "not prev_position: prev_position = position continue # Calculate distance between previous and current", "position continue # Calculate distance between previous and current route point interval_length_squared =", "Don't use the input location, use the corresponding wp as location target_location_from_wp =", "system, in m/s target_vel = np.array([[speed], [0.], [0.]]) # target velocity in world", "trans_matrix = get_transform_matrix(transform) # actor2world # target velocity in local coordinate system, in", "\"\"\" _T = transform.get_inverse_matrix() # transform matrix from Actor system to world system", "matrix from a transform class. Inverse transform refers to from world coord system", "((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2) # Close", "system to actor coord system. \"\"\" _T = transform.get_inverse_matrix() # transform matrix from", "matrix in ndarray \"\"\" # original trans matrix in list _T = transform.get_matrix()", "inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix", "< -np.pi: angle += 2 * np.pi else: angle -= 2 * np.pi", "trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix from a transform class.", "distance_squared < interval_length_squared: # Check if a neighbor lane is closer to the", "methods about getting kinetics \"\"\" kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity()", "\"\"\" Set vehicle to a target speed. Velocity vector coincide vehicle x-axis. :param:speed", "for # long route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp =", "original trans matrix in list _T = transform.get_matrix() # transform matrix from Actor", "new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location else: break wp =", "def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to a target speed. Velocity vector", "in ndarray \"\"\" # original trans matrix in list _T = transform.get_matrix() #", "speed for ego vehicle transform = vehicle.get_transform() # transform matrix from actor coord", "None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (", "** 2 + vel.y ** 2) # + vel.z ** 2) def set_vehicle_speed(vehicle,", "and parse a transformation matrix by transform. Matrix is from Actor coord system", "np.array([[speed], [0.], [0.]]) # target velocity in world coordinate system target_vel_world = np.dot(trans_matrix,", "Get kinetics of ego vehicle. todo use a class to encapsulate all methods", "target velocity in world coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world)", "the input location, use the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for", "carla.Vehicle): \"\"\" todo unfinished Get kinetics of ego vehicle. todo use a class", "wmap.get_waypoint(target_location).transform.location for trans, _ in route: # input route is transform position =", "Actor coord system to the world coord system. :param transform: :return trans_matrix: transform", "trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix", "not distance_squared < interval_length_squared: # Check if a neighbor lane is closer to", "ids match # Note: This assumes a sufficiently small route interval # An", "wp.road_id == curr_wp.road_id: # Roads match, now compare the sign of the lane", "np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within the current route", "will be returned :param wmap: carla.Map of current world :param route: list of", "now compare the sign of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or", "compare the sign of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id)", "prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if", "the given location along the route Note: If the location is not along", "distance_squared < 400 and not distance_squared < interval_length_squared: # Check if a neighbor", "= 0 prev_position = None found = False # Don't use the input", "carla.Vector3D instance to ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z]) return array def", "transform.get_matrix() # transform matrix from Actor system to world system trans_matrix = np.array([[_T[0][0],", "distance to correct route interval, otherwise the computation load is too high starting_wp", "location = new_location else: break wp = wp.get_right_lane() if distance_squared < interval_length_squared: #", "distance of the given location along the route Note: If the location is", "** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to a target speed.", "carla.Map of current world :param route: list of tuples, (carla.Transform, RoadOption) :param target_location:", "= target_location_from_wp # Don't perform any calculations for the first route point if", "# An alternative is to compare orientations, however, this also does not work", "in local coordinate system, in m/s target_vel = np.array([[speed], [0.], [0.]]) # target", "angle < -np.pi: angle += 2 * np.pi else: angle -= 2 *", "consider only 2D velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2 +", "= new_distance_squared location = new_location else: break wp = wp.get_right_lane() if distance_squared <", "Don't perform any calculations for the first route point if not prev_position: prev_position", "distance_squared < 1.0: break if distance_squared < 400 and not distance_squared < interval_length_squared:", "coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity", "def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray \"\"\" array = np.array([vector.x,", "coincide vehicle x-axis. :param:speed in m/s \"\"\" # set a initial speed for", "\"\"\" # original trans matrix in list _T = transform.get_matrix() # transform matrix", "trans.location location = target_location_from_wp # Don't perform any calculations for the first route", "Matrix is from Actor coord system to the world coord system. :param transform:", "prev_position = position continue # Calculate distance between previous and current route point", "refers to from world coord system to actor coord system. \"\"\" _T =", "** 2) + ((prev_position.y - position.y) ** 2) distance_squared = ((location.x - prev_position.x)", "interval # An alternative is to compare orientations, however, this also does not", "= np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) #", "_T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse", "prev_wp and curr_wp and wp: if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id:", "interval_length_squared: # Check if a neighbor lane is closer to the route #", "vel.y ** 2) # + vel.z ** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\"", "in list _T = transform.get_matrix() # transform matrix from Actor system to world", "Transform carla.Vector3D instance to ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z]) return array", "target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2],", "location along the route Note: If the location is not along the route,", "distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2)", "\"\"\" Transform carla.Vector3D instance to ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z]) return", "return math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z ** 2)", "a close distance to correct route interval, otherwise the computation load is too", "wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) **", "transform matrix from Actor system to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],", "distance_squared = new_distance_squared location = new_location else: break wp = wp.get_left_lane() wp =", "0 prev_position = None found = False # Don't use the input location,", "+ ((location.y - prev_position.y) ** 2) # Close to the current position? Stop", ") # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into certain bound. default range", "- prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2) # Close to", "system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity =", "-= 2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a transformation", "Note: If the location is not along the route, the route length will", "wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if wp.road_id ==", "np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within the current route interval covered_distance", "to the world coord system. :param transform: :return trans_matrix: transform matrix in ndarray", "ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\"", "< interval_length_squared: # Check if a neighbor lane is closer to the route", "Velocity vector coincide vehicle x-axis. :param:speed in m/s \"\"\" # set a initial", "matrix by transform. Matrix is from Actor coord system to the world coord", "and current route point interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y", "correct route interval, otherwise the computation load is too high starting_wp = wmap.get_waypoint(location)", "to encapsulate all methods about getting kinetics \"\"\" kinetic_dict = {} transform =", "math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z ** 2) def", "Inverse transform refers to from world coord system to actor coord system. \"\"\"", "use a class to encapsulate all methods about getting kinetics \"\"\" kinetic_dict =", "starting_wp.get_right_lane() while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x -", "matrix from Actor system to world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0],", "transform: :return trans_matrix: transform matrix in ndarray \"\"\" # original trans matrix in", "target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in route: # input route is transform", "does not work for # long route intervals curr_wp = wmap.get_waypoint(position) prev_wp =", "break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not None: new_location", "= None found = False # Don't use the input location, use the", "Close to the current position? Stop calculation if distance_squared < 1.0: break if", "== prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match, now compare the sign", "to from world coord system to actor coord system. \"\"\" _T = transform.get_inverse_matrix()", "kinetics \"\"\" kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route,", "found = False # Don't use the input location, use the corresponding wp", "location is within the current route interval covered_distance += math.sqrt(distance_squared) found = True", "continue # Calculate distance between previous and current route point interval_length_squared = ((prev_position.x", "location could be inside the current route interval, if route/lane ids match #", "starting_wp.get_left_lane() while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x -", "wp = wp.get_right_lane() if distance_squared < interval_length_squared: # The location could be inside", "local coordinate system, in m/s target_vel = np.array([[speed], [0.], [0.]]) # target velocity", "route interval covered_distance += math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared) prev_position", "world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]])", "of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance = 0 prev_position = None", "route point if not prev_position: prev_position = position continue # Calculate distance between", "- position.x) ** 2) + ((prev_position.y - position.y) ** 2) distance_squared = ((location.x", "transform.get_inverse_matrix() # transform matrix from Actor system to world system inverse_trans_matrix = np.array([[_T[0][0],", "returned :param wmap: carla.Map of current world :param route: list of tuples, (carla.Transform,", "position = trans.location location = target_location_from_wp # Don't perform any calculations for the", "wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not None: new_location =", "+= 2 * np.pi else: angle -= 2 * np.pi def get_transform_matrix(transform: carla.Transform):", "np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a transformation matrix by transform.", "** 2) + ((location.y - prev_position.y) ** 2) # Close to the current", "[_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix", "wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in route: # input", "orientations, however, this also does not work for # long route intervals curr_wp", "= vehicle.get_transform() # transform matrix from actor coord system to world system trans_matrix", "** 2) # + vel.z ** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set", "current route interval covered_distance += math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared)", "return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray \"\"\" array", "\"\"\" Some methods for kenetics.s \"\"\" import carla import numpy as np import", "route length will be returned :param wmap: carla.Map of current world :param route:", "= new_location else: break wp = wp.get_right_lane() if distance_squared < interval_length_squared: # The", "((location.y - prev_position.y) ** 2) # Close to the current position? Stop calculation", "route, the route length will be returned :param wmap: carla.Map of current world", "= starting_wp.get_right_lane() while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x", "transform class. Inverse transform refers to from world coord system to actor coord", "to the route # Do this only in a close distance to correct", "carla.Transform): \"\"\" Get and parse a transformation matrix by transform. Matrix is from", "distance_squared = new_distance_squared location = new_location else: break wp = wp.get_right_lane() if distance_squared", "the distance of the given location along the route Note: If the location", "np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics", "get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance of the given location along the", "previous and current route point interval_length_squared = ((prev_position.x - position.x) ** 2) +", "not along the route, the route length will be returned :param wmap: carla.Map", "== np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within the current", "transform matrix from actor coord system to world system trans_matrix = get_transform_matrix(transform) #", "angle <= np.pi: return angle if angle < -np.pi: angle += 2 *", "coord system. \"\"\" _T = transform.get_inverse_matrix() # transform matrix from Actor system to", "location = target_location_from_wp # Don't perform any calculations for the first route point", "distance_squared: distance_squared = new_distance_squared location = new_location else: break wp = wp.get_right_lane() if", "route interval, otherwise the computation load is too high starting_wp = wmap.get_waypoint(location) wp", "[0.]]) # target velocity in world coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world", "transform. Matrix is from Actor coord system to the world coord system. :param", "to compare orientations, however, this also does not work for # long route", "or wp.road_id == curr_wp.road_id: # Roads match, now compare the sign of the", "_T = transform.get_matrix() # transform matrix from Actor system to world system trans_matrix", "the route length will be returned :param wmap: carla.Map of current world :param", "new_location else: break wp = wp.get_right_lane() if distance_squared < interval_length_squared: # The location", "wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if wp.road_id == prev_wp.road_id or wp.road_id", "\"\"\" while True: if -np.pi <= angle <= np.pi: return angle if angle", "route: list of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance = 0 prev_position", "get_transform_matrix(transform) # actor2world # target velocity in local coordinate system, in m/s target_vel", "world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]])", "if new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location else: break wp", "else: break wp = wp.get_right_lane() if distance_squared < interval_length_squared: # The location could", "_T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance", "distance_squared: distance_squared = new_distance_squared location = new_location else: break wp = wp.get_left_lane() wp", "otherwise the computation load is too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane()", "default range is [-pi, pi] \"\"\" while True: if -np.pi <= angle <=", "new_location else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not", "interval, otherwise the computation load is too high starting_wp = wmap.get_waypoint(location) wp =", "Roads match, now compare the sign of the lane ids if (np.sign(wp.lane_id) ==", "new_distance_squared = ((new_location.x - prev_position.x) ** 2) + ( (new_location.y - prev_position.y) **", "prev_position.y) ** 2) # Close to the current position? Stop calculation if distance_squared", "location is not along the route, the route length will be returned :param", "system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return", "** 2) + ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):", "lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location", "unfinished Get kinetics of ego vehicle. todo use a class to encapsulate all", "actor2world # target velocity in local coordinate system, in m/s target_vel = np.array([[speed],", "in m/s target_vel = np.array([[speed], [0.], [0.]]) # target velocity in world coordinate", "np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1],", "# Calculate distance between previous and current route point interval_length_squared = ((prev_position.x -", "current route point interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y -", "_T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D):", "ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is", "\"\"\" # set a initial speed for ego vehicle transform = vehicle.get_transform() #", "angle_reg(angle): \"\"\" Regularize angle into certain bound. default range is [-pi, pi] \"\"\"", "= wp.get_right_lane() if distance_squared < interval_length_squared: # The location could be inside the", "kinetics of ego vehicle. todo use a class to encapsulate all methods about", "np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location else:", "interval_length_squared: # The location could be inside the current route interval, if route/lane", "route interval, if route/lane ids match # Note: This assumes a sufficiently small", "The location could be inside the current route interval, if route/lane ids match", "a initial speed for ego vehicle transform = vehicle.get_transform() # transform matrix from", "Actor system to world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]],", "of the given location along the route Note: If the location is not", "2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared", "ego vehicle transform = vehicle.get_transform() # transform matrix from actor coord system to", "= vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance of", "too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not None:", "route/lane ids match # Note: This assumes a sufficiently small route interval #", "# target velocity in local coordinate system, in m/s target_vel = np.array([[speed], [0.],", "velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y ** 2)", "alternative is to compare orientations, however, this also does not work for #", "# long route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location)", "False # Don't use the input location, use the corresponding wp as location", "pi] \"\"\" while True: if -np.pi <= angle <= np.pi: return angle if", "route point interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y)", "interval covered_distance += math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared) prev_position =", "< distance_squared: distance_squared = new_distance_squared location = new_location else: break wp = wp.get_left_lane()", "current position? Stop calculation if distance_squared < 1.0: break if distance_squared < 400", "to world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1],", "= wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp and", "from Actor system to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1],", "_T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix from", "vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance of the given location", "= vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z", "2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a transformation matrix", "is closer to the route # Do this only in a close distance", "encapsulate all methods about getting kinetics \"\"\" kinetic_dict = {} transform = vehicle.get_transform()", "list of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance = 0 prev_position =", "is too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not", "a sufficiently small route interval # An alternative is to compare orientations, however,", "# Do this only in a close distance to correct route interval, otherwise", "Regularize angle into certain bound. default range is [-pi, pi] \"\"\" while True:", "wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not None: new_location = wp.transform.location new_distance_squared", "in route: # input route is transform position = trans.location location = target_location_from_wp", "also does not work for # long route intervals curr_wp = wmap.get_waypoint(position) prev_wp", "length will be returned :param wmap: carla.Map of current world :param route: list", "(np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within the", "(new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared <", "_T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray \"\"\"", "def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance of the given location along", "route Note: If the location is not along the route, the route length", "np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform:", ":param target_location: \"\"\" covered_distance = 0 prev_position = None found = False #", "transform refers to from world coord system to actor coord system. \"\"\" _T", "within the current route interval covered_distance += math.sqrt(distance_squared) found = True break covered_distance", "coord system to the world coord system. :param transform: :return trans_matrix: transform matrix", "m/s target_vel = np.array([[speed], [0.], [0.]]) # target velocity in world coordinate system", "-np.pi: angle += 2 * np.pi else: angle -= 2 * np.pi def", "range is [-pi, pi] \"\"\" while True: if -np.pi <= angle <= np.pi:", "z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into certain bound. default", "is transform position = trans.location location = target_location_from_wp # Don't perform any calculations", "trans, _ in route: # input route is transform position = trans.location location", "for kenetics.s \"\"\" import carla import numpy as np import math def get_speed(vehicle):", "\"\"\" Get inverse transform matrix from a transform class. Inverse transform refers to", "coordinate system, in m/s target_vel = np.array([[speed], [0.], [0.]]) # target velocity in", "input location, use the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans,", "wp: if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match, now", "# Close to the current position? Stop calculation if distance_squared < 1.0: break", "\"\"\" Get speed consider only 2D velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x", "or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within the current route interval", "could be inside the current route interval, if route/lane ids match # Note:", "_T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix from a", "400 and not distance_squared < interval_length_squared: # Check if a neighbor lane is", "kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\"", "while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x)", "( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared", "in world coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in", "distance between previous and current route point interval_length_squared = ((prev_position.x - position.x) **", "math def get_speed(vehicle): \"\"\" Get speed consider only 2D velocity. \"\"\" vel =", "[_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform", "break if distance_squared < 400 and not distance_squared < interval_length_squared: # Check if", "however, this also does not work for # long route intervals curr_wp =", "if distance_squared < 400 and not distance_squared < interval_length_squared: # Check if a", "matrix from actor coord system to world system trans_matrix = get_transform_matrix(transform) # actor2world", "and not distance_squared < interval_length_squared: # Check if a neighbor lane is closer", "= np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get", "1.0: break if distance_squared < 400 and not distance_squared < interval_length_squared: # Check", "of current world :param route: list of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\"", "if -np.pi <= angle <= np.pi: return angle if angle < -np.pi: angle", "covered_distance = 0 prev_position = None found = False # Don't use the", "vehicle. todo use a class to encapsulate all methods about getting kinetics \"\"\"", "np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location =", "interval, if route/lane ids match # Note: This assumes a sufficiently small route", "world :param route: list of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance =", "angle into certain bound. default range is [-pi, pi] \"\"\" while True: if", "a transformation matrix by transform. Matrix is from Actor coord system to the", "the route, the route length will be returned :param wmap: carla.Map of current", "((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2) distance_squared =", "# vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into certain bound. default range is", "use the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in", "Set vehicle to a target speed. Velocity vector coincide vehicle x-axis. :param:speed in", "to actor coord system. \"\"\" _T = transform.get_inverse_matrix() # transform matrix from Actor", "break wp = wp.get_right_lane() if distance_squared < interval_length_squared: # The location could be", "\"\"\" Get and parse a transformation matrix by transform. Matrix is from Actor", "if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location", "bound. default range is [-pi, pi] \"\"\" while True: if -np.pi <= angle", "transformation matrix by transform. Matrix is from Actor coord system to the world", "np.sign(curr_wp.lane_id)): # The location is within the current route interval covered_distance += math.sqrt(distance_squared)", "compare orientations, however, this also does not work for # long route intervals", "target speed. Velocity vector coincide vehicle x-axis. :param:speed in m/s \"\"\" # set", "def angle_reg(angle): \"\"\" Regularize angle into certain bound. default range is [-pi, pi]", "from world coord system to actor coord system. \"\"\" _T = transform.get_inverse_matrix() #", "= trans.location location = target_location_from_wp # Don't perform any calculations for the first", "system. \"\"\" _T = transform.get_inverse_matrix() # transform matrix from Actor system to world", "y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into certain bound.", "np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity)", "== curr_wp.road_id: # Roads match, now compare the sign of the lane ids", "** 2) # Close to the current position? Stop calculation if distance_squared <", "get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix from a transform class. Inverse transform", "[_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get", "def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix from a transform class. Inverse", "\"\"\" array = np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo", "_T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray", "{} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the", "the computation load is too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while", ":param route: list of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance = 0", "target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D(", "getting kinetics \"\"\" kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap,", "into certain bound. default range is [-pi, pi] \"\"\" while True: if -np.pi", "computation load is too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp", "be returned :param wmap: carla.Map of current world :param route: list of tuples,", "# original trans matrix in list _T = transform.get_matrix() # transform matrix from", "(carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance = 0 prev_position = None found =", "from actor coord system to world system trans_matrix = get_transform_matrix(transform) # actor2world #", "in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle):", "= wmap.get_waypoint(target_location).transform.location for trans, _ in route: # input route is transform position", "distance_squared < interval_length_squared: # The location could be inside the current route interval,", "def get_speed(vehicle): \"\"\" Get speed consider only 2D velocity. \"\"\" vel = vehicle.get_velocity()", "carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\"", "to correct route interval, otherwise the computation load is too high starting_wp =", "transform matrix from Actor system to world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],", "system. :param transform: :return trans_matrix: transform matrix in ndarray \"\"\" # original trans", "the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The", "\"\"\" Calculate the distance of the given location along the route Note: If", "and curr_wp and wp: if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: #", "= starting_wp.get_left_lane() while wp is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x", "= {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate", "matrix from Actor system to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0],", "wp = wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if wp.road_id == prev_wp.road_id", "transform matrix from a transform class. Inverse transform refers to from world coord", "else: angle -= 2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse", "ndarray \"\"\" # original trans matrix in list _T = transform.get_matrix() # transform", "match, now compare the sign of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id)", "+ ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if", "vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y ** 2) # +", "The location is within the current route interval covered_distance += math.sqrt(distance_squared) found =", "not work for # long route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position)", "if a neighbor lane is closer to the route # Do this only", "prev_position: prev_position = position continue # Calculate distance between previous and current route", "a target speed. Velocity vector coincide vehicle x-axis. :param:speed in m/s \"\"\" #", "system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return", "Get and parse a transformation matrix by transform. Matrix is from Actor coord", "velocity in local coordinate system, in m/s target_vel = np.array([[speed], [0.], [0.]]) #", "2) + ((location.y - prev_position.y) ** 2) # Close to the current position?", "only 2D velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y", "first route point if not prev_position: prev_position = position continue # Calculate distance", "if distance_squared < interval_length_squared: # The location could be inside the current route", "vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics of ego", "while True: if -np.pi <= angle <= np.pi: return angle if angle <", "the current route interval, if route/lane ids match # Note: This assumes a", "!= np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared = new_distance_squared location = new_location", "2) # + vel.z ** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle", "vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics of", "tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance = 0 prev_position = None found", "in a close distance to correct route interval, otherwise the computation load is", "Check if a neighbor lane is closer to the route # Do this", "float): \"\"\" Set vehicle to a target speed. Velocity vector coincide vehicle x-axis.", "current route interval, if route/lane ids match # Note: This assumes a sufficiently", "# Don't use the input location, use the corresponding wp as location target_location_from_wp", "** 2) distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y)", "parse a transformation matrix by transform. Matrix is from Actor coord system to", "world coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D", "= np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def", "intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and", "< 1.0: break if distance_squared < 400 and not distance_squared < interval_length_squared: #", "transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance", ":param wmap: carla.Map of current world :param route: list of tuples, (carla.Transform, RoadOption)", "2) # Close to the current position? Stop calculation if distance_squared < 1.0:", "target_location: \"\"\" covered_distance = 0 prev_position = None found = False # Don't", "if distance_squared < 1.0: break if distance_squared < 400 and not distance_squared <", "input route is transform position = trans.location location = target_location_from_wp # Don't perform", "prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match, now compare the sign of", "\"\"\" Regularize angle into certain bound. default range is [-pi, pi] \"\"\" while", "the world coord system. :param transform: :return trans_matrix: transform matrix in ndarray \"\"\"", "system trans_matrix = get_transform_matrix(transform) # actor2world # target velocity in local coordinate system,", "world coord system to actor coord system. \"\"\" _T = transform.get_inverse_matrix() # transform", "the sign of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) ==", "This assumes a sufficiently small route interval # An alternative is to compare", "trans_matrix: transform matrix in ndarray \"\"\" # original trans matrix in list _T", "_T = transform.get_inverse_matrix() # transform matrix from Actor system to world system inverse_trans_matrix", "list _T = transform.get_matrix() # transform matrix from Actor system to world system", "speed: float): \"\"\" Set vehicle to a target speed. Velocity vector coincide vehicle", "route, target_location): \"\"\" Calculate the distance of the given location along the route", "as np import math def get_speed(vehicle): \"\"\" Get speed consider only 2D velocity.", "coord system to world system trans_matrix = get_transform_matrix(transform) # actor2world # target velocity", "carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z])", "curr_wp.road_id: # Roads match, now compare the sign of the lane ids if", "only in a close distance to correct route interval, otherwise the computation load", "target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], )", "None found = False # Don't use the input location, use the corresponding", "if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match, now compare", "Get inverse transform matrix from a transform class. Inverse transform refers to from", "location = new_location else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp", "import carla import numpy as np import math def get_speed(vehicle): \"\"\" Get speed", "= wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if wp.road_id", "world coord system. :param transform: :return trans_matrix: transform matrix in ndarray \"\"\" #", "vehicle.get_transform() # transform matrix from actor coord system to world system trans_matrix =", "= wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not None: new_location = wp.transform.location", "-np.pi <= angle <= np.pi: return angle if angle < -np.pi: angle +=", "use the input location, use the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location", "for ego vehicle transform = vehicle.get_transform() # transform matrix from actor coord system", "about getting kinetics \"\"\" kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def", "angle if angle < -np.pi: angle += 2 * np.pi else: angle -=", "return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics of ego vehicle.", "transform position = trans.location location = target_location_from_wp # Don't perform any calculations for", "class. Inverse transform refers to from world coord system to actor coord system.", "closer to the route # Do this only in a close distance to", "the route Note: If the location is not along the route, the route", "not None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) +", "the current position? Stop calculation if distance_squared < 1.0: break if distance_squared <", "position.x) ** 2) + ((prev_position.y - position.y) ** 2) distance_squared = ((location.x -", "system to world system trans_matrix = get_transform_matrix(transform) # actor2world # target velocity in", "set a initial speed for ego vehicle transform = vehicle.get_transform() # transform matrix", "((prev_position.y - position.y) ** 2) distance_squared = ((location.x - prev_position.x) ** 2) +", "math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared) prev_position = position return covered_distance,", "to ndarray \"\"\" array = np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle):", "trans matrix in list _T = transform.get_matrix() # transform matrix from Actor system", "_T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform", "lane is closer to the route # Do this only in a close", "system to world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0],", "* np.pi else: angle -= 2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get", "= position continue # Calculate distance between previous and current route point interval_length_squared", "load is too high starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is", "vehicle transform = vehicle.get_transform() # transform matrix from actor coord system to world", "= wmap.get_waypoint(location) if prev_wp and curr_wp and wp: if wp.road_id == prev_wp.road_id or", "vel.z ** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to a target", "x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into certain", "# transform matrix from actor coord system to world system trans_matrix = get_transform_matrix(transform)", "_T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\"", "If the location is not along the route, the route length will be", "RoadOption) :param target_location: \"\"\" covered_distance = 0 prev_position = None found = False", "= get_transform_matrix(transform) # actor2world # target velocity in local coordinate system, in m/s", "_T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D", "wp = starting_wp.get_left_lane() while wp is not None: new_location = wp.transform.location new_distance_squared =", "= np.array([[speed], [0.], [0.]]) # target velocity in world coordinate system target_vel_world =", "methods for kenetics.s \"\"\" import carla import numpy as np import math def", "along the route, the route length will be returned :param wmap: carla.Map of", "# Note: This assumes a sufficiently small route interval # An alternative is", "= wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not None: new_location = wp.transform.location", "is from Actor coord system to the world coord system. :param transform: :return", "system to the world coord system. :param transform: :return trans_matrix: transform matrix in", "carla.Transform): \"\"\" Get inverse transform matrix from a transform class. Inverse transform refers", "- position.y) ** 2) distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y", "set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to a target speed. Velocity vector coincide", "if route/lane ids match # Note: This assumes a sufficiently small route interval", "vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle into certain bound. default range is [-pi,", "\"\"\" kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location):", "todo unfinished Get kinetics of ego vehicle. todo use a class to encapsulate", "ego vehicle. todo use a class to encapsulate all methods about getting kinetics", "Calculate distance between previous and current route point interval_length_squared = ((prev_position.x - position.x)", "this also does not work for # long route intervals curr_wp = wmap.get_waypoint(position)", "+ vel.z ** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to a", "** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared =", "# Check if a neighbor lane is closer to the route # Do", "perform any calculations for the first route point if not prev_position: prev_position =", "= carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize angle", "todo use a class to encapsulate all methods about getting kinetics \"\"\" kinetic_dict", "given location along the route Note: If the location is not along the", "((new_location.x - prev_position.x) ** 2) + ( (new_location.y - prev_position.y) ** 2) if", "wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match, now compare the", "interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2)", "= transform.get_matrix() # transform matrix from Actor system to world system trans_matrix =", "# input route is transform position = trans.location location = target_location_from_wp # Don't", "\"\"\" covered_distance = 0 prev_position = None found = False # Don't use", "[-pi, pi] \"\"\" while True: if -np.pi <= angle <= np.pi: return angle", "position.y) ** 2) distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y -", "is [-pi, pi] \"\"\" while True: if -np.pi <= angle <= np.pi: return", "array = np.array([vector.x, vector.y, vector.z]) return array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished", "# Don't perform any calculations for the first route point if not prev_position:", "= ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2) #", "actor coord system. \"\"\" _T = transform.get_inverse_matrix() # transform matrix from Actor system", "if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): # The location is within", "2) distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) **", "- prev_position.x) ** 2) + ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id)", "any calculations for the first route point if not prev_position: prev_position = position", "# in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def", "get_speed(vehicle): \"\"\" Get speed consider only 2D velocity. \"\"\" vel = vehicle.get_velocity() return", "vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z **", "import math def get_speed(vehicle): \"\"\" Get speed consider only 2D velocity. \"\"\" vel", "assumes a sufficiently small route interval # An alternative is to compare orientations,", "\"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y ** 2) #", "vehicle to a target speed. Velocity vector coincide vehicle x-axis. :param:speed in m/s", "current world :param route: list of tuples, (carla.Transform, RoadOption) :param target_location: \"\"\" covered_distance", "actor coord system to world system trans_matrix = get_transform_matrix(transform) # actor2world # target", "Some methods for kenetics.s \"\"\" import carla import numpy as np import math", "calculations for the first route point if not prev_position: prev_position = position continue", "point interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) **", "sufficiently small route interval # An alternative is to compare orientations, however, this", "route: # input route is transform position = trans.location location = target_location_from_wp #", "[_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to", "wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) + ( (new_location.y - prev_position.y)", "Note: This assumes a sufficiently small route interval # An alternative is to", "sign of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)):", "is within the current route interval covered_distance += math.sqrt(distance_squared) found = True break", "the current route interval covered_distance += math.sqrt(distance_squared) found = True break covered_distance +=", "# Roads match, now compare the sign of the lane ids if (np.sign(wp.lane_id)", "initial speed for ego vehicle transform = vehicle.get_transform() # transform matrix from actor", "inverse transform matrix from a transform class. Inverse transform refers to from world", "target velocity in local coordinate system, in m/s target_vel = np.array([[speed], [0.], [0.]])", ":return trans_matrix: transform matrix in ndarray \"\"\" # original trans matrix in list", "= wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) + ( (new_location.y -", "target_location_from_wp # Don't perform any calculations for the first route point if not", "new_distance_squared location = new_location else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while", "starting_wp = wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not None: new_location =", "a class to encapsulate all methods about getting kinetics \"\"\" kinetic_dict = {}", "work for # long route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp", "from a transform class. Inverse transform refers to from world coord system to", "coord system to actor coord system. \"\"\" _T = transform.get_inverse_matrix() # transform matrix", "* np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a transformation matrix by", "# The location is within the current route interval covered_distance += math.sqrt(distance_squared) found", "<= angle <= np.pi: return angle if angle < -np.pi: angle += 2", "np.pi: return angle if angle < -np.pi: angle += 2 * np.pi else:", "wp = starting_wp.get_right_lane() while wp is not None: new_location = wp.transform.location new_distance_squared =", "\"\"\" todo unfinished Get kinetics of ego vehicle. todo use a class to", "route intervals curr_wp = wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp", "< interval_length_squared: # The location could be inside the current route interval, if", "= ((new_location.x - prev_position.x) ** 2) + ( (new_location.y - prev_position.y) ** 2)", "get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics of ego vehicle. todo use a", "np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return inverse_trans_matrix def vector2array(vector:", "if prev_wp and curr_wp and wp: if wp.road_id == prev_wp.road_id or wp.road_id ==", "numpy as np import math def get_speed(vehicle): \"\"\" Get speed consider only 2D", "of ego vehicle. todo use a class to encapsulate all methods about getting", "= transform.get_inverse_matrix() # transform matrix from Actor system to world system inverse_trans_matrix =", "wp.get_right_lane() if distance_squared < interval_length_squared: # The location could be inside the current", "2D velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2 + vel.y **", "all methods about getting kinetics \"\"\" kinetic_dict = {} transform = vehicle.get_transform() vehicle.get_acceleration()", "target_vel = np.array([[speed], [0.], [0.]]) # target velocity in world coordinate system target_vel_world", "return angle if angle < -np.pi: angle += 2 * np.pi else: angle", "of the lane ids if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)): #", "target_velocity = carla.Vector3D( x=target_vel_world[0], y=target_vel_world[1], z=target_vel_world[2], ) # vehicle.set_target_velocity(target_velocity) def angle_reg(angle): \"\"\" Regularize", "the corresponding wp as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in route:", "a transform class. Inverse transform refers to from world coord system to actor", "return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\" Get inverse transform matrix from a transform", "close distance to correct route interval, otherwise the computation load is too high", "world system trans_matrix = get_transform_matrix(transform) # actor2world # target velocity in local coordinate", "coord system. :param transform: :return trans_matrix: transform matrix in ndarray \"\"\" # original", "Stop calculation if distance_squared < 1.0: break if distance_squared < 400 and not", "to world system trans_matrix = get_transform_matrix(transform) # actor2world # target velocity in local", "class to encapsulate all methods about getting kinetics \"\"\" kinetic_dict = {} transform", "vector coincide vehicle x-axis. :param:speed in m/s \"\"\" # set a initial speed", "to a target speed. Velocity vector coincide vehicle x-axis. :param:speed in m/s \"\"\"", "a neighbor lane is closer to the route # Do this only in", "Do this only in a close distance to correct route interval, otherwise the", "system to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0],", "np.pi else: angle -= 2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\" Get and", "+ ((prev_position.y - position.y) ** 2) distance_squared = ((location.x - prev_position.x) ** 2)", "is not None: new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2)", "along the route Note: If the location is not along the route, the", "True: if -np.pi <= angle <= np.pi: return angle if angle < -np.pi:", "import numpy as np import math def get_speed(vehicle): \"\"\" Get speed consider only", ":param transform: :return trans_matrix: transform matrix in ndarray \"\"\" # original trans matrix", "to the current position? Stop calculation if distance_squared < 1.0: break if distance_squared", "vehicle x-axis. :param:speed in m/s \"\"\" # set a initial speed for ego", "2) + ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break", "by transform. Matrix is from Actor coord system to the world coord system.", "Actor system to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]],", "- prev_position.y) ** 2) # Close to the current position? Stop calculation if", "the route # Do this only in a close distance to correct route", "speed consider only 2D velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x ** 2", "An alternative is to compare orientations, however, this also does not work for", "- prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared:", "angle += 2 * np.pi else: angle -= 2 * np.pi def get_transform_matrix(transform:", "def get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a transformation matrix by transform. Matrix", "+ vel.y ** 2) # + vel.z ** 2) def set_vehicle_speed(vehicle, speed: float):", "for the first route point if not prev_position: prev_position = position continue #", "new_location = wp.transform.location new_distance_squared = ((new_location.x - prev_position.x) ** 2) + ( (new_location.y", "prev_position.y) ** 2) if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id): break if new_distance_squared < distance_squared: distance_squared", "# + vel.z ** 2) def set_vehicle_speed(vehicle, speed: float): \"\"\" Set vehicle to", "matrix in list _T = transform.get_matrix() # transform matrix from Actor system to", "certain bound. default range is [-pi, pi] \"\"\" while True: if -np.pi <=", "match # Note: This assumes a sufficiently small route interval # An alternative", "between previous and current route point interval_length_squared = ((prev_position.x - position.x) ** 2)", "= np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) # in carla.Vector3D target_velocity = carla.Vector3D( x=target_vel_world[0],", "<= np.pi: return angle if angle < -np.pi: angle += 2 * np.pi", "is not along the route, the route length will be returned :param wmap:", "is to compare orientations, however, this also does not work for # long", "position? Stop calculation if distance_squared < 1.0: break if distance_squared < 400 and", "to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1],", "new_distance_squared location = new_location else: break wp = wp.get_right_lane() if distance_squared < interval_length_squared:", "get_transform_matrix(transform: carla.Transform): \"\"\" Get and parse a transformation matrix by transform. Matrix is", "# The location could be inside the current route interval, if route/lane ids", "found = True break covered_distance += math.sqrt(interval_length_squared) prev_position = position return covered_distance, found", "carla import numpy as np import math def get_speed(vehicle): \"\"\" Get speed consider", "transform matrix in ndarray \"\"\" # original trans matrix in list _T =", "[0.], [0.]]) # target velocity in world coordinate system target_vel_world = np.dot(trans_matrix, target_vel)", "array def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics of ego vehicle. todo", "np import math def get_speed(vehicle): \"\"\" Get speed consider only 2D velocity. \"\"\"", "inverse_trans_matrix def vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray \"\"\" array =", "speed. Velocity vector coincide vehicle x-axis. :param:speed in m/s \"\"\" # set a", "as location target_location_from_wp = wmap.get_waypoint(target_location).transform.location for trans, _ in route: # input route", "wmap.get_waypoint(position) prev_wp = wmap.get_waypoint(prev_position) wp = wmap.get_waypoint(location) if prev_wp and curr_wp and wp:", "Get speed consider only 2D velocity. \"\"\" vel = vehicle.get_velocity() return math.sqrt(vel.x **", "_T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def get_inverse_transform_matrix(transform: carla.Transform): \"\"\"", "vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance of the given", "# set a initial speed for ego vehicle transform = vehicle.get_transform() # transform", "# transform matrix from Actor system to world system trans_matrix = np.array([[_T[0][0], _T[0][1],", "and wp: if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id: # Roads match,", "if angle < -np.pi: angle += 2 * np.pi else: angle -= 2", "if not prev_position: prev_position = position continue # Calculate distance between previous and", "from Actor system to world system trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1],", "= ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2) distance_squared", "_ in route: # input route is transform position = trans.location location =", "# transform matrix from Actor system to world system inverse_trans_matrix = np.array([[_T[0][0], _T[0][1],", "def get_vehicle_kinetic(vehicle: carla.Vehicle): \"\"\" todo unfinished Get kinetics of ego vehicle. todo use", "else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is not None:", "\"\"\" import carla import numpy as np import math def get_speed(vehicle): \"\"\" Get", "vehicle.get_transform() vehicle.get_acceleration() vehicle.get_angular_velocity() def get_distance_along_route(wmap, route, target_location): \"\"\" Calculate the distance of the", "route # Do this only in a close distance to correct route interval,", "wmap.get_waypoint(location) wp = starting_wp.get_left_lane() while wp is not None: new_location = wp.transform.location new_distance_squared", "wmap: carla.Map of current world :param route: list of tuples, (carla.Transform, RoadOption) :param", "+= math.sqrt(distance_squared) found = True break covered_distance += math.sqrt(interval_length_squared) prev_position = position return", "= np.array([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0], _T[2][1], _T[2][2]]]) return trans_matrix def", "# actor2world # target velocity in local coordinate system, in m/s target_vel =", "velocity in world coordinate system target_vel_world = np.dot(trans_matrix, target_vel) target_vel_world = np.squeeze(target_vel_world) #", "= new_location else: break wp = wp.get_left_lane() wp = starting_wp.get_right_lane() while wp is", "route is transform position = trans.location location = target_location_from_wp # Don't perform any", "for trans, _ in route: # input route is transform position = trans.location", "2 * np.pi else: angle -= 2 * np.pi def get_transform_matrix(transform: carla.Transform): \"\"\"", "prev_position = None found = False # Don't use the input location, use", "prev_position.x) ** 2) + ( (new_location.y - prev_position.y) ** 2) if np.sign(starting_wp.lane_id) !=", "vector2array(vector: carla.Vector3D): \"\"\" Transform carla.Vector3D instance to ndarray \"\"\" array = np.array([vector.x, vector.y," ]
[ "identifiers on which this node depends. :param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.", "__repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return", "= gav @property def node_id(self): \"\"\" **[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary.", "**[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency", "arguments. The following keyword arguments are supported (corresponding to the getters/setters of this", "License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown", "the Application Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return", "of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\"", "'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav': 'gav',", "ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :return: The vulnerabilities of this", "list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities", "= application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary.", "this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets", ":type gav: str :param node_id: The value to assign to the node_id property", "as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You", "this element depends on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary", "node identifiers on which this node depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.", "the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :return:", "Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node.", "= node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.", "<reponame>pabs3/oci-python-sdk<filename>src/oci/adm/models/application_dependency_vulnerability_summary.py # coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates.", "which this node depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\"", "represents a single dependency in our application. An Application Dependency Vulnerability can be", "ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the", "@gav.setter def gav(self, gav): \"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group", "ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav @property def node_id(self): \"\"\" **[Required]** Gets", "\"\"\" self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of", "value to assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability]", ":type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other):", "Application Dependency is uniquely defined by a nodeId and lists eventual dependencies that", "coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights", "(UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at", "vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for", "\"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of", "this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :param vulnerabilities: The vulnerabilities", "other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return", "self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of this", "the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The value", "The value to assign to the gav property of this ApplicationDependencyVulnerabilitySummary. :type gav:", "The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id @property def", "ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the", "base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base =", "and lists eventual dependencies that this element depends on. \"\"\" def __init__(self, **kwargs):", "\"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this", "Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from", "eventual Vulnerabilities. Each Application Dependency is uniquely defined by a nodeId and lists", "Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype:", ":rtype: str \"\"\" return self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets the node_id", "dependencies that this element depends on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a", "of an Application Dependency node. :param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type:", "ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the knowledge base. :param is_found_in_knowledge_base:", "for the Application Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\"", "with eventual Vulnerabilities. Each Application Dependency is uniquely defined by a nodeId and", "The value to assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities:", "bool \"\"\" self.swagger_types = { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]',", "2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed", "knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base", "The value to assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids:", "node identifiers on which this node depends. :param application_dependency_node_ids: The application_dependency_node_ids of this", "__init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments.", "\"\"\" **[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV)", "eventual dependencies that this element depends on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes", "def gav(self, gav): \"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact", "{ 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav", "Dependency Vulnerability represents a single dependency in our application. An Application Dependency Vulnerability", "List of vulnerabilities for the Application Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary.", "def node_id(self, node_id): \"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of", "its affiliates. All rights reserved. # This software is dual-licensed to you under", "class): :param gav: The value to assign to the gav property of this", "this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]**", "of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The gav", "to assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param", "lists eventual dependencies that this element depends on. \"\"\" def __init__(self, **kwargs): \"\"\"", "ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the knowledge base. :return: The", "gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav @property", "vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property", "of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :param node_id: The", ":return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def", "= is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None:", "\"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of this", "ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :return: The node_id of this", "under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache", "'isFoundInKnowledgeBase' } self._gav = None self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities =", "of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :param vulnerabilities: The", "ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self,", "= None self._is_found_in_knowledge_base = None @property def gav(self): \"\"\" **[Required]** Gets the gav", ":return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def", "vulnerabilities for the Application Dependency. :param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type:", "noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency", "bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if", "self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base = None @property def gav(self): \"\"\"", "Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type:", "ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter def gav(self, gav): \"\"\" Sets the", "can be associated with eventual Vulnerabilities. Each Application Dependency is uniquely defined by", "@property def node_id(self): \"\"\" **[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier", "oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents a", "values from keyword arguments. The following keyword arguments are supported (corresponding to the", "is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav':", "# coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All", "ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets", ":param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id", ":type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]',", "to assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param", "to assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\"", "Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter", "this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The value to assign to the", ":type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets the", "vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates", "\"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with values from", "'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds',", "which this node depends. :param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str]", "this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on which this node depends.", "The value to assign to the node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id:", "Application Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities", "on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with values", "'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav':", "utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.", "you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or", "value to assign to the gav property of this ApplicationDependencyVulnerabilitySummary. :type gav: str", "this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The value to assign to the", "Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str", "application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node", "ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the", "} self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base':", "An Application Dependency Vulnerability can be associated with eventual Vulnerabilities. Each Application Dependency", "return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__", "vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign to the is_found_in_knowledge_base property of", "value to assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool", "object with values from keyword arguments. The following keyword arguments are supported (corresponding", "on which this node depends. :param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type:", "vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self):", "Indicates if the artifact is found in the knowledge base. :param is_found_in_knowledge_base: The", "the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the", "(corresponding to the getters/setters of this class): :param gav: The value to assign", ":param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids", "node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id @property def application_dependency_node_ids(self):", "'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id = None self._application_dependency_node_ids =", "str \"\"\" self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids", "of vulnerabilities for the Application Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype:", "in our application. An Application Dependency Vulnerability can be associated with eventual Vulnerabilities.", "# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. #", "return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary.", "gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav @property def node_id(self):", "application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self):", "None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self", "ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets the", "\"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier", "formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object):", "of (Application Dependencies) node identifiers on which this node depends. :param application_dependency_node_ids: The", "depends on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with", "application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List", "Application Dependency Vulnerability can be associated with eventual Vulnerabilities. Each Application Dependency is", "init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents a single dependency", "this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter def gav(self, gav): \"\"\" Sets", "Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version).", "is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base):", "import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class", "bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of", "= None self._vulnerabilities = None self._is_found_in_knowledge_base = None @property def gav(self): \"\"\" **[Required]**", "application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies)", "the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\"", "the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = {", ":rtype: str \"\"\" return self._gav @gav.setter def gav(self, gav): \"\"\" Sets the gav", "Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav: The gav of this ApplicationDependencyVulnerabilitySummary.", "vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :return: The", "(Application Dependencies) node identifiers on which this node depends. :param application_dependency_node_ids: The application_dependency_node_ids", "this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the knowledge base. :return:", "application_dependency_node_ids: list[str] :param vulnerabilities: The value to assign to the vulnerabilities property of", "of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :return: The node_id", "of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav @property def node_id(self): \"\"\"", "The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self):", ":param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities", "Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in", "The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def", "of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The value to assign to", "application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids):", "gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter def gav(self, gav):", "other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self,", "the gav property of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The value", ":param vulnerabilities: The value to assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary.", "@node_id.setter def node_id(self, node_id): \"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier", "\"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other", "this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :param node_id: The node_id", "self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique", "\"\"\" **[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application", "software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as", "property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav': 'str',", "ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on which this node depends. :param", "this node depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return", "node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :return: The", "our application. An Application Dependency Vulnerability can be associated with eventual Vulnerabilities. Each", "identifier (Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav", "application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property", "Dependencies) node identifiers on which this node depends. :return: The application_dependency_node_ids of this", "return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.", "if the artifact is found in the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base", "that this element depends on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new", "node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The value to", "http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel #", "of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter def gav(self, gav): \"\"\"", "Application Dependency node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return", ":param node_id: The value to assign to the node_id property of this ApplicationDependencyVulnerabilitySummary.", "of (Application Dependencies) node identifiers on which this node depends. :return: The application_dependency_node_ids", "(GAV) identifier (Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return", "2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is", "self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates", "if the artifact is found in the knowledge base. :return: The is_found_in_knowledge_base of", "if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other):", "nodeId and lists eventual dependencies that this element depends on. \"\"\" def __init__(self,", "'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId',", "this node depends. :param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\"", ":type application_dependency_node_ids: list[str] :param vulnerabilities: The value to assign to the vulnerabilities property", "ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents a single dependency in our application.", "node. :param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id =", "'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav': 'gav', 'node_id':", "ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign to the is_found_in_knowledge_base", "is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities':", "this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The gav of", "ApplicationDependencyVulnerabilitySummary object with values from keyword arguments. The following keyword arguments are supported", "node_id: str :param application_dependency_node_ids: The value to assign to the application_dependency_node_ids property of", "vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities):", "may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401", "this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets", "this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign to the", "License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util", "The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self,", "Application Dependency. :param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities", "is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact", "ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets", "'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id = None self._application_dependency_node_ids = None", "application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The value to", "depends. :param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids =", "Dependency is uniquely defined by a nodeId and lists eventual dependencies that this", "Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency.", "gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav:", "Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in", "def gav(self): \"\"\" **[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact", "= None self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base = None @property def", "An Application Dependency Vulnerability represents a single dependency in our application. An Application", "Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav: The gav of this", "@property def gav(self): \"\"\" **[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group", "property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The value to assign", "element depends on. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object", "supported (corresponding to the getters/setters of this class): :param gav: The value to", "the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License", "Vulnerability can be associated with eventual Vulnerabilities. Each Application Dependency is uniquely defined", "self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base = None @property", "@vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of", "\"\"\" self.swagger_types = { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base':", "\"\"\" self._gav = gav @property def node_id(self): \"\"\" **[Required]** Gets the node_id of", "ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :param node_id: The node_id of", "to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types =", "an Application Dependency node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\"", "this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :return: The vulnerabilities of", "Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version).", "\"\"\" **[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is", "this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets", "\"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application", "\"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers", "https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either", "ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The value to assign to the vulnerabilities", "= None self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base =", "The value to assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base:", "= vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.", "of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign to", "shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may", "from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters", "gav(self, gav): \"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version", "'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id =", "property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign", "NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\"", "be associated with eventual Vulnerabilities. Each Application Dependency is uniquely defined by a", "gav(self): \"\"\" **[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version", "of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\"", "def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities", "this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav': 'str', 'node_id': 'str',", "**[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers", "def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__", "to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The", "gav: str :param node_id: The value to assign to the node_id property of", "node_id(self): \"\"\" **[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an", "this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def", "for the Application Dependency. :param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability]", "\"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this", "vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :param vulnerabilities:", "self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase'", "'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities':", "Unique identifier of an Application Dependency node. :param node_id: The node_id of this", "self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List", "list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of", "__eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def", "of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\"", "depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter", "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs", "and/or its affiliates. All rights reserved. # This software is dual-licensed to you", "from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents", "node_id): \"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application", "arguments are supported (corresponding to the getters/setters of this class): :param gav: The", "\"\"\" An Application Dependency Vulnerability represents a single dependency in our application. An", "The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self,", "def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application", "identifier of an Application Dependency node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype:", "node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter", "@property def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of", "of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\"", "node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id @property", "Version (GAV) identifier (Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\"", "Vulnerabilities. Each Application Dependency is uniquely defined by a nodeId and lists eventual", "List of (Application Dependencies) node identifiers on which this node depends. :param application_dependency_node_ids:", "The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self,", "vulnerabilities for the Application Dependency. :return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability]", "self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of this", "this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav: The gav", "of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav: The", "\"\"\" return self._gav @gav.setter def gav(self, gav): \"\"\" Sets the gav of this", ":param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base", "Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on", "is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown", "self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List", "vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to", "the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :param", "is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def", "self._is_found_in_knowledge_base = None @property def gav(self): \"\"\" **[Required]** Gets the gav of this", "Dependency Vulnerability can be associated with eventual Vulnerabilities. Each Application Dependency is uniquely", "You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa:", ":type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign to the is_found_in_knowledge_base property", "defined by a nodeId and lists eventual dependencies that this element depends on.", "application. An Application Dependency Vulnerability can be associated with eventual Vulnerabilities. Each Application", "this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets", "artifact is found in the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this", "identifier (Group:Artifact:Version). :param gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav", "The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def", "return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.", "of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The value to assign to", "at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose", "ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on which this node depends. :return:", ":rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base", "This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0", "def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities", ":param is_found_in_knowledge_base: The value to assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary.", "is found in the knowledge base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype:", "a new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments. The following keyword arguments", "of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the knowledge base.", "Application Dependency node. :param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\"", "single dependency in our application. An Application Dependency Vulnerability can be associated with", "Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on", "\"\"\" **[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the", "associated with eventual Vulnerabilities. Each Application Dependency is uniquely defined by a nodeId", "The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter def gav(self,", "Each Application Dependency is uniquely defined by a nodeId and lists eventual dependencies", "import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents a single", "@application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of", "node depends. :param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids", "def __init__(self, **kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with values from keyword", "are supported (corresponding to the getters/setters of this class): :param gav: The value", "self.swagger_types = { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool'", "license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import", "uniquely defined by a nodeId and lists eventual dependencies that this element depends", "'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id", "this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The value to assign to the", "def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the", "this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :return: The node_id of", "reserved. # This software is dual-licensed to you under the Universal Permissive License", "Application Dependency Vulnerability represents a single dependency in our application. An Application Dependency", "a single dependency in our application. An Application Dependency Vulnerability can be associated", "list[str] :param vulnerabilities: The value to assign to the vulnerabilities property of this", "by a nodeId and lists eventual dependencies that this element depends on. \"\"\"", "or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.", "node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List", "the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :return:", "new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments. The following keyword arguments are", "'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids':", "Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary.", "Initializes a new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments. The following keyword", "Indicates if the artifact is found in the knowledge base. :return: The is_found_in_knowledge_base", "Dependencies) node identifiers on which this node depends. :param application_dependency_node_ids: The application_dependency_node_ids of", "node_id: The value to assign to the node_id property of this ApplicationDependencyVulnerabilitySummary. :type", "**[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application", "(Group:Artifact:Version). :param gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav =", "Dependency. :param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities =", ":type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the", "to the getters/setters of this class): :param gav: The value to assign to", "\"\"\" **[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node", "in the knowledge base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\"", "base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter", "property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities: The value to assign", "'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map =", "Unique identifier of an Application Dependency node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary.", "None self._is_found_in_knowledge_base = None @property def gav(self): \"\"\" **[Required]** Gets the gav of", "def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application", "the artifact is found in the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of", "application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on which this", "to assign to the node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param", "node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter def node_id(self, node_id):", "of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter def node_id(self, node_id): \"\"\"", "@property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of", "Vulnerability represents a single dependency in our application. An Application Dependency Vulnerability can", "assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str] :param vulnerabilities:", "is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is", "@property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if", ":type node_id: str :param application_dependency_node_ids: The value to assign to the application_dependency_node_ids property", "Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency.", "value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An", "= None @property def gav(self): \"\"\" **[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary.", "vulnerabilities: The value to assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type", "of vulnerabilities for the Application Dependency. :param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary.", "@init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents a single dependency in", "of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The value to assign to", "artifact is found in the knowledge base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.", "node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :param node_id:", "1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0.", "of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :return: The vulnerabilities", "assign to the gav property of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id:", "of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self)", "ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param gav: The gav of", "assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base:", "def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False", "choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from", "**[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier", "is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool \"\"\" self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return", "of this ApplicationDependencyVulnerabilitySummary. :rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\"", "vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for", "'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None", "identifier of an Application Dependency node. :param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary.", "'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map = {", "a nodeId and lists eventual dependencies that this element depends on. \"\"\" def", "assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types", "} self._gav = None self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities = None", "property of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The value to assign", "def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the", "return self._gav @gav.setter def gav(self, gav): \"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary.", "this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]**", "(Group:Artifact:Version). :return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter", "of this class): :param gav: The value to assign to the gav property", "\"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments. The following", "at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel", "is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return", "gav): \"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV)", "None @property def gav(self): \"\"\" **[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary. Unique", ":param gav: The value to assign to the gav property of this ApplicationDependencyVulnerabilitySummary.", "'bool' } self.attribute_map = { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities',", "List of (Application Dependencies) node identifiers on which this node depends. :return: The", ":param application_dependency_node_ids: The value to assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary.", "is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not", "identifiers on which this node depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype:", "self._gav @gav.setter def gav(self, gav): \"\"\" Sets the gav of this ApplicationDependencyVulnerabilitySummary. Unique", "the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :param", "(Application Dependencies) node identifiers on which this node depends. :return: The application_dependency_node_ids of", "= { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' }", "value to assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type application_dependency_node_ids: list[str]", "The following keyword arguments are supported (corresponding to the getters/setters of this class):", "None self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base = None", "the artifact is found in the knowledge base. :return: The is_found_in_knowledge_base of this", "All rights reserved. # This software is dual-licensed to you under the Universal", "is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact", "'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav =", "the knowledge base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return", "formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ ==", "of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on which this node", "str \"\"\" return self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets the node_id of", "an Application Dependency node. :param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str", ":type: str \"\"\" self._node_id = node_id @property def application_dependency_node_ids(self): \"\"\" **[Required]** Gets the", "to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The", "ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The value to assign to the application_dependency_node_ids", "either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators", "node_id(self, node_id): \"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an", "this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav @property def node_id(self): \"\"\" **[Required]**", "Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as", "str \"\"\" self._gav = gav @property def node_id(self): \"\"\" **[Required]** Gets the node_id", "Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node.", "@is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if", "list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value to assign to the is_found_in_knowledge_base property of this", ":param gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav", "of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base @is_found_in_knowledge_base.setter def is_found_in_knowledge_base(self, is_found_in_knowledge_base): \"\"\"", "F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability", "gav: The value to assign to the gav property of this ApplicationDependencyVulnerabilitySummary. :type", "class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application Dependency Vulnerability represents a single dependency in our", "node depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids", "this class): :param gav: The value to assign to the gav property of", "getters/setters of this class): :param gav: The value to assign to the gav", "Dependency node. :param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._node_id", "is uniquely defined by a nodeId and lists eventual dependencies that this element", "to assign to the gav property of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param", "value to assign to the node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str", "the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary. :type vulnerabilities: list[oci.adm.models.Vulnerability] :param is_found_in_knowledge_base: The value", "**[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found", "\"\"\" Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found", "'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id = None self._application_dependency_node_ids", "list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities of", "gav @property def node_id(self): \"\"\" **[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique", "rights reserved. # This software is dual-licensed to you under the Universal Permissive", "return self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary.", "None self._vulnerabilities = None self._is_found_in_knowledge_base = None @property def gav(self): \"\"\" **[Required]** Gets", "self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base of this", "self._is_found_in_knowledge_base = is_found_in_knowledge_base def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is", "self._vulnerabilities = None self._is_found_in_knowledge_base = None @property def gav(self): \"\"\" **[Required]** Gets the", "\"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]** Gets the vulnerabilities of", "return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self ==", "2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import", "the Application Dependency. :param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\"", "'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities", "None self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base = None @property def gav(self):", "(c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software", "ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The value to assign to the node_id", "shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL,", "gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The", "knowledge base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool \"\"\" return self._is_found_in_knowledge_base", "def node_id(self): \"\"\" **[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of", "assign to the node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids:", "'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' } self._gav = None self._node_id = None", "Dependency node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id", "is found in the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.", "\"\"\" return self._node_id @node_id.setter def node_id(self, node_id): \"\"\" Sets the node_id of this", "vulnerabilities): \"\"\" Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the", "of this ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav': 'str', 'node_id':", "ApplicationDependencyVulnerabilitySummary. :type: list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets", "The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter def node_id(self,", "List of vulnerabilities for the Application Dependency. :param vulnerabilities: The vulnerabilities of this", "oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs", "dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at", "Version (GAV) identifier (Group:Artifact:Version). :param gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type: str", "the node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The value", "on which this node depends. :return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str]", "affiliates. All rights reserved. # This software is dual-licensed to you under the", "as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict,", "to the gav property of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The", "with values from keyword arguments. The following keyword arguments are supported (corresponding to", "to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl", "list[oci.adm.models.Vulnerability] \"\"\" self._vulnerabilities = vulnerabilities @property def is_found_in_knowledge_base(self): \"\"\" **[Required]** Gets the is_found_in_knowledge_base", "# This software is dual-licensed to you under the Universal Permissive License (UPL)", ":return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. :rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def", "this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the knowledge base. :param", "found in the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type:", "keyword arguments are supported (corresponding to the getters/setters of this class): :param gav:", ":rtype: list[str] \"\"\" return self._application_dependency_node_ids @application_dependency_node_ids.setter def application_dependency_node_ids(self, application_dependency_node_ids): \"\"\" Sets the application_dependency_node_ids", "is_found_in_knowledge_base: The value to assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary. :type", "ApplicationDependencyVulnerabilitySummary. :type is_found_in_knowledge_base: bool \"\"\" self.swagger_types = { 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids':", "self._gav = None self._node_id = None self._application_dependency_node_ids = None self._vulnerabilities = None self._is_found_in_knowledge_base", "Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to", "found in the knowledge base. :return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :rtype: bool", "# noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class ApplicationDependencyVulnerabilitySummary(object): \"\"\" An Application", "The gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\" self._gav = gav @property def", "of an Application Dependency node. :return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str", "Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This", "the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies) node identifiers on which", "= { 'gav': 'gav', 'node_id': 'nodeId', 'application_dependency_node_ids': 'applicationDependencyNodeIds', 'vulnerabilities': 'vulnerabilities', 'is_found_in_knowledge_base': 'isFoundInKnowledgeBase' }", "in the knowledge base. :param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. :type: bool", "str :param application_dependency_node_ids: The value to assign to the application_dependency_node_ids property of this", ":return: The gav of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._gav @gav.setter def", "ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return: The gav of this", "\"\"\" Sets the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency", "self._gav = gav @property def node_id(self): \"\"\" **[Required]** Gets the node_id of this", "**kwargs): \"\"\" Initializes a new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments. The", "str :param node_id: The value to assign to the node_id property of this", "str \"\"\" return self._gav @gav.setter def gav(self, gav): \"\"\" Sets the gav of", "following keyword arguments are supported (corresponding to the getters/setters of this class): :param", ":return: The node_id of this ApplicationDependencyVulnerabilitySummary. :rtype: str \"\"\" return self._node_id @node_id.setter def", "application_dependency_node_ids: The value to assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary. :type", "dependency in our application. An Application Dependency Vulnerability can be associated with eventual", "is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary. Indicates if the artifact is found in the knowledge", "the node_id of this ApplicationDependencyVulnerabilitySummary. Unique identifier of an Application Dependency node. :param", "the gav of this ApplicationDependencyVulnerabilitySummary. Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version). :return:", ":type: str \"\"\" self._gav = gav @property def node_id(self): \"\"\" **[Required]** Gets the", "gav property of this ApplicationDependencyVulnerabilitySummary. :type gav: str :param node_id: The value to", "Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0", "False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other", "ApplicationDependencyVulnerabilitySummary. List of vulnerabilities for the Application Dependency. :param vulnerabilities: The vulnerabilities of", ":rtype: list[oci.adm.models.Vulnerability] \"\"\" return self._vulnerabilities @vulnerabilities.setter def vulnerabilities(self, vulnerabilities): \"\"\" Sets the vulnerabilities", "application_dependency_node_ids(self): \"\"\" **[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary. List of (Application Dependencies)", "{ 'gav': 'str', 'node_id': 'str', 'application_dependency_node_ids': 'list[str]', 'vulnerabilities': 'list[Vulnerability]', 'is_found_in_knowledge_base': 'bool' } self.attribute_map", "this ApplicationDependencyVulnerabilitySummary. :type: list[str] \"\"\" self._application_dependency_node_ids = application_dependency_node_ids @property def vulnerabilities(self): \"\"\" **[Required]**", "the getters/setters of this class): :param gav: The value to assign to the", "to the node_id property of this ApplicationDependencyVulnerabilitySummary. :type node_id: str :param application_dependency_node_ids: The", "(GAV) identifier (Group:Artifact:Version). :param gav: The gav of this ApplicationDependencyVulnerabilitySummary. :type: str \"\"\"", "keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of" ]
[ "= \"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class", "= [] updated = [] for issue in issues: if issue.created.date() >= since:", "s = f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR] \" s += f\"{issue.title}\\n\"", "week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s += \"The", "j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\"", "pull requests were opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in", "<<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number: int title: str url: str", "f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return s", "datetime.datetime user: str pull_request: bool = False def main() -> None: since =", "ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j,", "Opened and changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue],", "= [] for issue in issues: if issue.created.date() >= since: new.append(issue) else: updated.append(issue)", "maintained by @srittau. \"\"\" from __future__ import annotations import datetime from dataclasses import", "the typing-sig mailing list. Due to limitation with GitHub Actions, the mail is", "Any) -> Issue: number = j[\"number\"] title = j[\"title\"] url = j[\"html_url\"] created_at", "changed) s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and pull requests with", "list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\":", "in the\\n\" \"typing repository on GitHub with the label 'topic: feature'\\n\" \"that were", "\"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i)", "url: str created: datetime.datetime user: str pull_request: bool = False def main() ->", "pull requests were updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in", "= j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert isinstance(number, int) assert isinstance(title, str)", "opened or updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) >", "= j[\"number\"] title = j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user =", "print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues week {since:%G-W%V}\") print()", "def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL,", "issue in issues: if issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number)", "len(changed) == 0: s = ( \"No issues or pull requests with the", "requests were updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in changed)", "Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new = [] updated = []", "on GitHub.\\n\\n\" ) else: s = ( \"The following is an overview of", "following URL:\\n\\n\" ) s += ISSUES_URL return s def generate_issue_text(issue: Issue) -> str:", "user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert isinstance(number, int) assert isinstance(title,", "list of new and changed issues and is sent each Monday at 0200", "requests in the\\n\" \"typing repository on GitHub with the label 'topic: feature'\\n\" \"that", "= datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) -> list[Issue]:", "updated last week in the typing repository on GitHub.\\n\\n\" ) else: s =", "updated = split_issues(issues, since) print_summary(since, new, updated) def previous_week_start() -> datetime.date: today =", "def print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\")", "import dataclass from typing import Any, Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\"", "- datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\"", "#!/usr/bin/env python3 \"\"\" Generate a summary of last week's issues tagged with \"topic:", "datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert isinstance(number, int) assert", "generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new) == 0 and len(changed) ==", "requests were opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in new)", "requests with the label 'topic: feature' were opened\\n\" \"or updated last week in", "under the following URL:\\n\\n\" ) s += ISSUES_URL return s def generate_issue_text(issue: Issue)", "{since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new)", "previous_week_start() issues = fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since, new, updated) def", "\"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i) for j_i in", "s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and pull requests with the", "isinstance(url, str) assert isinstance(user, str) return Issue(number, title, url, created_at, user, pull_request) def", "[parse_issue(j_i) for j_i in j] def parse_issue(j: Any) -> Issue: number = j[\"number\"]", "== 0: s = ( \"No issues or pull requests with the label", "\"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i) for j_i", "dataclass from typing import Any, Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL", "params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json()", "str) assert isinstance(user, str) return Issue(number, title, url, created_at, user, pull_request) def split_issues(", "user, pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new", "changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed", "opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in new) s +=", "with the label 'topic: feature'\\n\" \"can be viewed under the following URL:\\n\\n\" )", "\"\"\" Generate a summary of last week's issues tagged with \"topic: feature\". The", "return new, updated def print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) ->", "server, currently maintained by @srittau. \"\"\" from __future__ import annotations import datetime from", "to the typing-sig mailing list. Due to limitation with GitHub Actions, the mail", "title = j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request", "Due to limitation with GitHub Actions, the mail is sent from a private", "were updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in changed) s", "f\" opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return s if __name__ ==", "str: s = f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR] \" s +=", "}, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i) for j_i in j]", "typing repository on GitHub.\\n\\n\" ) else: s = ( \"The following is an", "since) print_summary(since, new, updated) def previous_week_start() -> datetime.date: today = datetime.date.today() return today", "since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new = [] updated = [] for", "= previous_week_start() issues = fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since, new, updated)", "int) assert isinstance(title, str) assert isinstance(url, str) assert isinstance(user, str) return Issue(number, title,", "[] for issue in issues: if issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda", "URL:\\n\\n\" ) s += ISSUES_URL return s def generate_issue_text(issue: Issue) -> str: s", "fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since, new, updated) def previous_week_start() -> datetime.date:", "str created: datetime.datetime user: str pull_request: bool = False def main() -> None:", "s def generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5} \" if issue.pull_request: s", "int title: str url: str created: datetime.datetime user: str pull_request: bool = False", "return Issue(number, title, url, created_at, user, pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date", "created: datetime.datetime user: str pull_request: bool = False def main() -> None: since", "The summary will include a list of new and changed issues and is", "for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and", "-> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL,", "\"that were opened or updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if", "issues and pull requests were opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for", "issues and pull requests were updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for", "tuple[list[Issue], list[Issue]]: new = [] updated = [] for issue in issues: if", "url, created_at, user, pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue],", "for issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s +=", "mailing list. Due to limitation with GitHub Actions, the mail is sent from", ">= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return new,", "Actions, the mail is sent from a private server, currently maintained by @srittau.", "0: s += \"The following issues and pull requests were opened last week:", "feature' were opened\\n\" \"or updated last week in the typing repository on GitHub.\\n\\n\"", "requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL =", "\\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed)", "issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\":", "i.number) updated.sort(key=lambda i: i.number) return new, updated def print_summary( since: datetime.date, new: Sequence[Issue],", "headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i) for j_i in j] def", "main() -> None: since = previous_week_start() issues = fetch_issues(since) new, updated = split_issues(issues,", "+= \"The following issues and pull requests were opened last week: \\n\\n\" s", "Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new) == 0 and len(changed) == 0:", "print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To:", "\" s += f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\" s += f\"", "s += \"[PR] \" s += f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\"", "ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"},", "if len(new) == 0 and len(changed) == 0: s = ( \"No issues", "mail is sent from a private server, currently maintained by @srittau. \"\"\" from", "created_at, user, pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]:", "+= f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return", "issues or pull requests with the label 'topic: feature' were opened\\n\" \"or updated", "str url: str created: datetime.datetime user: str pull_request: bool = False def main()", "import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL", "feature\". The summary will include a list of new and changed issues and", "feature'\\n\" \"that were opened or updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" )", "in changed) s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and pull requests", "return [parse_issue(j_i) for j_i in j] def parse_issue(j: Any) -> Issue: number =", "requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\":", "new, updated def print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) -> None:", "if issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number)", "+= \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s += \"The following issues and pull", "following is an overview of all issues and pull requests in the\\n\" \"typing", "fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={", "python3 \"\"\" Generate a summary of last week's issues tagged with \"topic: feature\".", "or updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0:", "\"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s += \"The following issues and pull requests", "in issues: if issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda", "+= \"[PR] \" s += f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\" s", "created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert isinstance(number,", "\"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert", "\"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s += \"The following issues and pull", "\"All issues and pull requests with the label 'topic: feature'\\n\" \"can be viewed", "= \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL =", "\" if issue.pull_request: s += \"[PR] \" s += f\"{issue.title}\\n\" s += f\"", "new and changed issues and is sent each Monday at 0200 CE(S)T to", "__future__ import annotations import datetime from dataclasses import dataclass from typing import Any,", "+= f\" opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return s if __name__", "if len(new) > 0: s += \"The following issues and pull requests were", "is sent from a private server, currently maintained by @srittau. \"\"\" from __future__", "\"or updated last week in the typing repository on GitHub.\\n\\n\" ) else: s", "updated) def previous_week_start() -> datetime.date: today = datetime.date.today() return today - datetime.timedelta(days=today.weekday() +", "datetime.date ) -> tuple[list[Issue], list[Issue]]: new = [] updated = [] for issue", "updated.sort(key=lambda i: i.number) return new, updated def print_summary( since: datetime.date, new: Sequence[Issue], changed:", "s += f\" opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return s if", "a list of new and changed issues and is sent each Monday at", "str: if len(new) == 0 and len(changed) == 0: s = ( \"No", "if issue.pull_request: s += \"[PR] \" s += f\"{issue.title}\\n\" s += f\" opened", "and pull requests were opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue", "import annotations import datetime from dataclasses import dataclass from typing import Any, Iterable,", "= \"pull_request\" in j assert isinstance(number, int) assert isinstance(title, str) assert isinstance(url, str)", "Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and", "= \"<EMAIL>\" @dataclass class Issue: number: int title: str url: str created: datetime.datetime", "\\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s +=", "typing-sig mailing list. Due to limitation with GitHub Actions, the mail is sent", "-> tuple[list[Issue], list[Issue]]: new = [] updated = [] for issue in issues:", "print_summary(since, new, updated) def previous_week_start() -> datetime.date: today = datetime.date.today() return today -", "new: Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened", "print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed))", "f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR] \" s += f\"{issue.title}\\n\" s +=", "repository on GitHub with the label 'topic: feature'\\n\" \"that were opened or updated", "s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s += \"The following issues and", "title: str url: str created: datetime.datetime user: str pull_request: bool = False def", "None: since = previous_week_start() issues = fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since,", "for j_i in j] def parse_issue(j: Any) -> Issue: number = j[\"number\"] title", "datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\":", "isinstance(j, list) return [parse_issue(j_i) for j_i in j] def parse_issue(j: Any) -> Issue:", "str) return Issue(number, title, url, created_at, user, pull_request) def split_issues( issues: Iterable[Issue], since:", "list. Due to limitation with GitHub Actions, the mail is sent from a", "Generate a summary of last week's issues tagged with \"topic: feature\". The summary", "= split_issues(issues, since) print_summary(since, new, updated) def previous_week_start() -> datetime.date: today = datetime.date.today()", "= j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in j", "last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\"", "week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s", "pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new =", "Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\"", "str) assert isinstance(url, str) assert isinstance(user, str) return Issue(number, title, url, created_at, user,", "Any, Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL =", "\"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i) for j_i in j] def parse_issue(j:", "0: s = ( \"No issues or pull requests with the label 'topic:", "+= ISSUES_URL return s def generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5} \"", "\"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\"", "requests with the label 'topic: feature'\\n\" \"can be viewed under the following URL:\\n\\n\"", "ISSUES_URL return s def generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5} \" if", "since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\")", "by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return s if __name__ == \"__main__\": main()", "assert isinstance(url, str) assert isinstance(user, str) return Issue(number, title, url, created_at, user, pull_request)", "s += ISSUES_URL return s def generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5}", "s = ( \"The following is an overview of all issues and pull", "issues and pull requests with the label 'topic: feature'\\n\" \"can be viewed under", "updated def print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From:", "changed issues and is sent each Monday at 0200 CE(S)T to the typing-sig", "\"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\"", "feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number:", "overview of all issues and pull requests in the\\n\" \"typing repository on GitHub", ") -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues", "label 'topic: feature' were opened\\n\" \"or updated last week in the typing repository", "week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if", "class Issue: number: int title: str url: str created: datetime.datetime user: str pull_request:", "+= ( \"All issues and pull requests with the label 'topic: feature'\\n\" \"can", "+ 7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j =", "Issue(number, title, url, created_at, user, pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date )", "for issue in issues: if issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i:", "tagged with \"topic: feature\". The summary will include a list of new and", "\"pull_request\" in j assert isinstance(number, int) assert isinstance(title, str) assert isinstance(url, str) assert", "GitHub Actions, the mail is sent from a private server, currently maintained by", "= f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR] \" s += f\"{issue.title}\\n\" s", "j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert isinstance(number, int) assert isinstance(title, str) assert", "Issue) -> str: s = f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR] \"", "datetime.date, new: Sequence[Issue], changed: Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject:", "pull_request = \"pull_request\" in j assert isinstance(number, int) assert isinstance(title, str) assert isinstance(url,", "else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return new, updated def print_summary(", "[] updated = [] for issue in issues: if issue.created.date() >= since: new.append(issue)", "issues: if issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i:", "datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return", "title, url, created_at, user, pull_request) def split_issues( issues: Iterable[Issue], since: datetime.date ) ->", "GitHub with the label 'topic: feature'\\n\" \"that were opened or updated last week,", "issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and pull", "@dataclass class Issue: number: int title: str url: str created: datetime.datetime user: str", "j_i in j] def parse_issue(j: Any) -> Issue: number = j[\"number\"] title =", "list[Issue]]: new = [] updated = [] for issue in issues: if issue.created.date()", "assert isinstance(title, str) assert isinstance(url, str) assert isinstance(user, str) return Issue(number, title, url,", ") else: s = ( \"The following is an overview of all issues", "and changed issues and is sent each Monday at 0200 CE(S)T to the", "the typing repository on GitHub.\\n\\n\" ) else: s = ( \"The following is", "isinstance(number, int) assert isinstance(title, str) assert isinstance(url, str) assert isinstance(user, str) return Issue(number,", "the label 'topic: feature'\\n\" \"that were opened or updated last week, excluding closed", "number: int title: str url: str created: datetime.datetime user: str pull_request: bool =", "new, updated) def previous_week_start() -> datetime.date: today = datetime.date.today() return today - datetime.timedelta(days=today.weekday()", ").json() assert isinstance(j, list) return [parse_issue(j_i) for j_i in j] def parse_issue(j: Any)", "0: s += \"The following issues and pull requests were updated last week:", "the mail is sent from a private server, currently maintained by @srittau. \"\"\"", "with the label 'topic: feature' were opened\\n\" \"or updated last week in the", "= \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing Bot", "CE(S)T to the typing-sig mailing list. Due to limitation with GitHub Actions, the", "number = j[\"number\"] title = j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user", "excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s += \"The following", "from __future__ import annotations import datetime from dataclasses import dataclass from typing import", "( \"All issues and pull requests with the label 'topic: feature'\\n\" \"can be", "= fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since, new, updated) def previous_week_start() ->", "new, updated = split_issues(issues, since) print_summary(since, new, updated) def previous_week_start() -> datetime.date: today", "j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\",", "the label 'topic: feature' were opened\\n\" \"or updated last week in the typing", "= requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", },", "import datetime from dataclasses import dataclass from typing import Any, Iterable, Sequence import", "in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s += \"The following", "= ( \"The following is an overview of all issues and pull requests", "(new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\":", "+= \"\".join(generate_issue_text(issue) for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All", "with GitHub Actions, the mail is sent from a private server, currently maintained", "and pull requests with the label 'topic: feature'\\n\" \"can be viewed under the", "bool = False def main() -> None: since = previous_week_start() issues = fetch_issues(since)", "\"No issues or pull requests with the label 'topic: feature' were opened\\n\" \"or", "the following URL:\\n\\n\" ) s += ISSUES_URL return s def generate_issue_text(issue: Issue) ->", "len(changed) > 0: s += \"The following issues and pull requests were updated", "-> str: if len(new) == 0 and len(changed) == 0: s = (", "viewed under the following URL:\\n\\n\" ) s += ISSUES_URL return s def generate_issue_text(issue:", "RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number: int title: str url: str created:", "return s def generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5} \" if issue.pull_request:", "pull requests with the label 'topic: feature' were opened\\n\" \"or updated last week", "issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s += \"The", "of new and changed issues and is sent each Monday at 0200 CE(S)T", "summary will include a list of new and changed issues and is sent", "Sequence[Issue] ) -> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing", "following issues and pull requests were opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue)", "and pull requests in the\\n\" \"typing repository on GitHub with the label 'topic:", "since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return new, updated", "\"typing repository on GitHub with the label 'topic: feature'\\n\" \"that were opened or", "new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s += \"The following issues", "def previous_week_start() -> datetime.date: today = datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7)", "-> None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues week", "typing import Any, Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\"", "changed: Sequence[Issue]) -> str: if len(new) == 0 and len(changed) == 0: s", "pull_request: bool = False def main() -> None: since = previous_week_start() issues =", "\"[PR] \" s += f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\" s +=", "by @srittau. \"\"\" from __future__ import annotations import datetime from dataclasses import dataclass", "a summary of last week's issues tagged with \"topic: feature\". The summary will", "{RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def", "j] def parse_issue(j: Any) -> Issue: number = j[\"number\"] title = j[\"title\"] url", "i.number) return new, updated def print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue] )", "len(new) > 0: s += \"The following issues and pull requests were opened", "'topic: feature'\\n\" \"that were opened or updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\"", "None: print(f\"From: {SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues week {since:%G-W%V}\")", "issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str:", "def generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5} \" if issue.pull_request: s +=", "last week in the typing repository on GitHub.\\n\\n\" ) else: s = (", "and changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed:", "typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) ->", "a private server, currently maintained by @srittau. \"\"\" from __future__ import annotations import", "generate_issue_text(issue: Issue) -> str: s = f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR]", "pull requests in the\\n\" \"typing repository on GitHub with the label 'topic: feature'\\n\"", "updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s", "\"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number: int title: str", "def parse_issue(j: Any) -> Issue: number = j[\"number\"] title = j[\"title\"] url =", "+= \"The following issues and pull requests were updated last week: \\n\\n\" s", "of last week's issues tagged with \"topic: feature\". The summary will include a", "changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue])", "week's issues tagged with \"topic: feature\". The summary will include a list of", "label 'topic: feature'\\n\" \"that were opened or updated last week, excluding closed issues.\\n\\n\"", "Issue: number = j[\"number\"] title = j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1])", "\"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list)", "changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new) == 0 and", "private server, currently maintained by @srittau. \"\"\" from __future__ import annotations import datetime", "since = previous_week_start() issues = fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since, new,", "in j] def parse_issue(j: Any) -> Issue: number = j[\"number\"] title = j[\"title\"]", "( \"The following is an overview of all issues and pull requests in", "> 0: s += \"The following issues and pull requests were opened last", "the label 'topic: feature'\\n\" \"can be viewed under the following URL:\\n\\n\" ) s", "print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new) == 0", "include a list of new and changed issues and is sent each Monday", "= datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert isinstance(number, int)", "f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return", "all issues and pull requests in the\\n\" \"typing repository on GitHub with the", "issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new = [] updated =", "ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass", "last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in new) s += \"\\n---------------------------------------------------\\n\\n\"", "else: s = ( \"The following is an overview of all issues and", "sent from a private server, currently maintained by @srittau. \"\"\" from __future__ import", ") -> tuple[list[Issue], list[Issue]]: new = [] updated = [] for issue in", "closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s += \"The following issues", "updated = [] for issue in issues: if issue.created.date() >= since: new.append(issue) else:", "last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s +=", "-> None: since = previous_week_start() issues = fetch_issues(since) new, updated = split_issues(issues, since)", "were opened\\n\" \"or updated last week in the typing repository on GitHub.\\n\\n\" )", "\"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue:", "'topic: feature' were opened\\n\" \"or updated last week in the typing repository on", "today = datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) ->", "an overview of all issues and pull requests in the\\n\" \"typing repository on", "updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return new, updated def print_summary( since:", "will include a list of new and changed issues and is sent each", "= False def main() -> None: since = previous_week_start() issues = fetch_issues(since) new,", "= ( \"No issues or pull requests with the label 'topic: feature' were", "( \"No issues or pull requests with the label 'topic: feature' were opened\\n\"", "issues = fetch_issues(since) new, updated = split_issues(issues, since) print_summary(since, new, updated) def previous_week_start()", "currently maintained by @srittau. \"\"\" from __future__ import annotations import datetime from dataclasses", "j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in j assert", "or pull requests with the label 'topic: feature' were opened\\n\" \"or updated last", "and len(changed) == 0: s = ( \"No issues or pull requests with", "\"100\", \"state\": \"open\", }, headers={\"Accept\": \"application/vnd.github.v3+json\"}, ).json() assert isinstance(j, list) return [parse_issue(j_i) for", "updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\", \"per_page\": \"100\",", "7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j = requests.get(", "0200 CE(S)T to the typing-sig mailing list. Due to limitation with GitHub Actions,", "\"The following is an overview of all issues and pull requests in the\\n\"", "def split_issues( issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new = []", "s += \"\".join(generate_issue_text(issue) for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s += (", "dataclasses import dataclass from typing import Any, Iterable, Sequence import requests ISSUES_API_URL =", ") if len(new) > 0: s += \"The following issues and pull requests", "issues and is sent each Monday at 0200 CE(S)T to the typing-sig mailing", "sent each Monday at 0200 CE(S)T to the typing-sig mailing list. Due to", "\"The following issues and pull requests were updated last week: \\n\\n\" s +=", "split_issues( issues: Iterable[Issue], since: datetime.date ) -> tuple[list[Issue], list[Issue]]: new = [] updated", "+= \"\".join(generate_issue_text(issue) for issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0:", "<gh_stars>1000+ #!/usr/bin/env python3 \"\"\" Generate a summary of last week's issues tagged with", "with \"topic: feature\". The summary will include a list of new and changed", "summary of last week's issues tagged with \"topic: feature\". The summary will include", "and is sent each Monday at 0200 CE(S)T to the typing-sig mailing list.", "is sent each Monday at 0200 CE(S)T to the typing-sig mailing list. Due", "on GitHub with the label 'topic: feature'\\n\" \"that were opened or updated last", "SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number: int", "'topic: feature'\\n\" \"can be viewed under the following URL:\\n\\n\" ) s += ISSUES_URL", "s = ( \"No issues or pull requests with the label 'topic: feature'", "\"\"\"Return (new, updated) issues.\"\"\" j = requests.get( ISSUES_API_URL, params={ \"labels\": ISSUES_LABEL, \"since\": f\"{since:%Y-%m-%d}T00:00:00Z\",", "new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return new, updated def print_summary( since: datetime.date,", "if len(changed) > 0: s += \"The following issues and pull requests were", "-> str: s = f\"#{issue.number:<5} \" if issue.pull_request: s += \"[PR] \" s", "last week's issues tagged with \"topic: feature\". The summary will include a list", "Sequence[Issue]) -> str: if len(new) == 0 and len(changed) == 0: s =", ") s += ISSUES_URL return s def generate_issue_text(issue: Issue) -> str: s =", "assert isinstance(j, list) return [parse_issue(j_i) for j_i in j] def parse_issue(j: Any) ->", "i: i.number) return new, updated def print_summary( since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue]", "issues and pull requests in the\\n\" \"typing repository on GitHub with the label", "with the label 'topic: feature'\\n\" \"that were opened or updated last week, excluding", "print() print(generate_mail(new, changed)) def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new) ==", "j assert isinstance(number, int) assert isinstance(title, str) assert isinstance(url, str) assert isinstance(user, str)", "week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if", "in the typing repository on GitHub.\\n\\n\" ) else: s = ( \"The following", "at 0200 CE(S)T to the typing-sig mailing list. Due to limitation with GitHub", "import Any, Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL", "Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic:", "\"\"\" from __future__ import annotations import datetime from dataclasses import dataclass from typing", "GitHub.\\n\\n\" ) else: s = ( \"The following is an overview of all", "user: str pull_request: bool = False def main() -> None: since = previous_week_start()", "@srittau. \"\"\" from __future__ import annotations import datetime from dataclasses import dataclass from", "of all issues and pull requests in the\\n\" \"typing repository on GitHub with", "\"<EMAIL>\" @dataclass class Issue: number: int title: str url: str created: datetime.datetime user:", "issue.pull_request: s += \"[PR] \" s += f\"{issue.title}\\n\" s += f\" opened by", "s += \"\".join(generate_issue_text(issue) for issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) >", "issues tagged with \"topic: feature\". The summary will include a list of new", "datetime.date: today = datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date)", "assert isinstance(number, int) assert isinstance(title, str) assert isinstance(url, str) assert isinstance(user, str) return", "is an overview of all issues and pull requests in the\\n\" \"typing repository", "datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated) issues.\"\"\" j", "following issues and pull requests were updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue)", "issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new) > 0: s += \"The following issues and", "assert isinstance(user, str) return Issue(number, title, url, created_at, user, pull_request) def split_issues( issues:", "in j assert isinstance(number, int) assert isinstance(title, str) assert isinstance(url, str) assert isinstance(user,", "annotations import datetime from dataclasses import dataclass from typing import Any, Iterable, Sequence", "s += \"The following issues and pull requests were opened last week: \\n\\n\"", "limitation with GitHub Actions, the mail is sent from a private server, currently", "the\\n\" \"typing repository on GitHub with the label 'topic: feature'\\n\" \"that were opened", "> 0: s += \"The following issues and pull requests were updated last", "def main() -> None: since = previous_week_start() issues = fetch_issues(since) new, updated =", "-> Issue: number = j[\"number\"] title = j[\"title\"] url = j[\"html_url\"] created_at =", "and pull requests were updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue", "\"topic: feature\". The summary will include a list of new and changed issues", "\"can be viewed under the following URL:\\n\\n\" ) s += ISSUES_URL return s", "0 and len(changed) == 0: s = ( \"No issues or pull requests", "Monday at 0200 CE(S)T to the typing-sig mailing list. Due to limitation with", "issue.created.date() >= since: new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return", "def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str: if len(new) == 0 and len(changed)", "list) return [parse_issue(j_i) for j_i in j] def parse_issue(j: Any) -> Issue: number", "feature'\\n\" \"can be viewed under the following URL:\\n\\n\" ) s += ISSUES_URL return", "each Monday at 0200 CE(S)T to the typing-sig mailing list. Due to limitation", "i: i.number) updated.sort(key=lambda i: i.number) return new, updated def print_summary( since: datetime.date, new:", "print(f\"Subject: Opened and changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new, changed)) def generate_mail(new:", "= \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number: int title:", "j[\"number\"] title = j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"]", "from dataclasses import dataclass from typing import Any, Iterable, Sequence import requests ISSUES_API_URL", "to limitation with GitHub Actions, the mail is sent from a private server,", "pull requests with the label 'topic: feature'\\n\" \"can be viewed under the following", "opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\" return s if __name__ == \"__main__\":", "new.append(issue) else: updated.append(issue) new.sort(key=lambda i: i.number) updated.sort(key=lambda i: i.number) return new, updated def", "isinstance(title, str) assert isinstance(url, str) assert isinstance(user, str) return Issue(number, title, url, created_at,", "\"The following issues and pull requests were opened last week: \\n\\n\" s +=", "updated last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in changed) s +=", "be viewed under the following URL:\\n\\n\" ) s += ISSUES_URL return s def", "return today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new,", "\"\".join(generate_issue_text(issue) for issue in new) s += \"\\n---------------------------------------------------\\n\\n\" if len(changed) > 0: s", "+= \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and pull requests with the label", "s += f\"{issue.title}\\n\" s += f\" opened by @{issue.user}\\n\" s += f\" {issue.url}\\n\"", "isinstance(user, str) return Issue(number, title, url, created_at, user, pull_request) def split_issues( issues: Iterable[Issue],", "ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing", "str pull_request: bool = False def main() -> None: since = previous_week_start() issues", "\"\".join(generate_issue_text(issue) for issue in changed) s += \"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues", "len(new) == 0 and len(changed) == 0: s = ( \"No issues or", "url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request = \"pull_request\" in", "today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since: datetime.date) -> list[Issue]: \"\"\"Return (new, updated)", "were opened or updated last week, excluding closed issues.\\n\\n\" \"---------------------------------------------------\\n\\n\" ) if len(new)", "== 0 and len(changed) == 0: s = ( \"No issues or pull", "label 'topic: feature'\\n\" \"can be viewed under the following URL:\\n\\n\" ) s +=", "-> datetime.date: today = datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7) def fetch_issues(since:", "= j[\"title\"] url = j[\"html_url\"] created_at = datetime.datetime.fromisoformat(j[\"created_at\"][:-1]) user = j[\"user\"][\"login\"] pull_request =", "opened\\n\" \"or updated last week in the typing repository on GitHub.\\n\\n\" ) else:", "datetime from dataclasses import dataclass from typing import Any, Iterable, Sequence import requests", "split_issues(issues, since) print_summary(since, new, updated) def previous_week_start() -> datetime.date: today = datetime.date.today() return", "s += ( \"All issues and pull requests with the label 'topic: feature'\\n\"", "\"\\n---------------------------------------------------\\n\\n\" s += ( \"All issues and pull requests with the label 'topic:", "from typing import Any, Iterable, Sequence import requests ISSUES_API_URL = \"https://api.github.com/repos/python/typing/issues\" ISSUES_URL =", "parse_issue(j: Any) -> Issue: number = j[\"number\"] title = j[\"title\"] url = j[\"html_url\"]", "repository on GitHub.\\n\\n\" ) else: s = ( \"The following is an overview", "week in the typing repository on GitHub.\\n\\n\" ) else: s = ( \"The", "Bot <<EMAIL>>\" RECEIVER_EMAIL = \"<EMAIL>\" @dataclass class Issue: number: int title: str url:", "previous_week_start() -> datetime.date: today = datetime.date.today() return today - datetime.timedelta(days=today.weekday() + 7) def", "s += \"The following issues and pull requests were updated last week: \\n\\n\"", "{SENDER_EMAIL}\") print(f\"To: {RECEIVER_EMAIL}\") print(f\"Subject: Opened and changed typing issues week {since:%G-W%V}\") print() print(generate_mail(new,", "were opened last week: \\n\\n\" s += \"\".join(generate_issue_text(issue) for issue in new) s", "new = [] updated = [] for issue in issues: if issue.created.date() >=", "ISSUES_URL = \"https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22\" ISSUES_LABEL = \"topic: feature\" SENDER_EMAIL = \"Typing Bot <<EMAIL>>\" RECEIVER_EMAIL", "Issue: number: int title: str url: str created: datetime.datetime user: str pull_request: bool", "False def main() -> None: since = previous_week_start() issues = fetch_issues(since) new, updated", "from a private server, currently maintained by @srittau. \"\"\" from __future__ import annotations" ]
[ "while not tag: tag = self.write_no_block(8, text) return tag def write_no_block(self, sector, text):", "0 for i in range(0, 5): n = n * 256 + uid[i]", "TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll()", "in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text): tag = self.write_no_block(8, text)", "= bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i)", "text) while not tag: tag = self.write_no_block(8, text) return tag def write_no_block(self, sector,", "class SimpleMFRC522: READER = None; TAG = { 'id' : None, 'text' :", "'id' : None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER =", "= 0 for i in range(0, 5): n = n * 256 +", "status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i", "data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i", "data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in text)", "if status != self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll() if status !=", "sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status,", "self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text']", "self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector,", "read(self): tag = self.read_no_block() while not tag: tag = self.read_no_block() return tag def", "text: self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self,", "text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text): tag = self.write_no_block(8, text) while", "= None; TAG = { 'id' : None, 'text' : ''}; KEY =", "== self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in", "uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text", "def write(self, sector, text): tag = self.write_no_block(8, text) while not tag: tag =", "not tag: tag = self.write_no_block(8, text) return tag def write_no_block(self, sector, text): (status,", "self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text): tag = self.write_no_block(8, text) while not", "uid) if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i)", "if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for", "status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data =", "''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n =", "text) return tag def write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status", "8, self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8,", "None (status, uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None self.TAG['id'] =", "return self.TAG def uid_to_num(self, uid): n = 0 for i in range(0, 5):", "(status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status, uid) =", "= self.write_no_block(8, text) while not tag: tag = self.write_no_block(8, text) return tag def", "= ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text):", "def read(self): tag = self.read_no_block() while not tag: tag = self.read_no_block() return tag", "return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8)", "tag = self.write_no_block(8, text) return tag def write_no_block(self, sector, text): (status, TagType) =", "uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid)", "self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None", "self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in", "def __init__(self): self.READER = MFRC522.MFRC522() def read(self): tag = self.read_no_block() while not tag:", "def uid_to_num(self, uid): n = 0 for i in range(0, 5): n =", "MFRC522 import RPi.GPIO as GPIO class SimpleMFRC522: READER = None; TAG = {", "self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status =", "return None (status, uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None self.TAG['id']", "self.read_no_block() while not tag: tag = self.read_no_block() return tag def read_no_block(self): (status, TagType)", "def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status,", "for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n = 0", "if status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8)", "return tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return", "self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data", "tag def write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK:", "tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None", "self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text:", "for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text): tag =", "self.write_no_block(8, text) return tag def write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if", "import MFRC522 import RPi.GPIO as GPIO class SimpleMFRC522: READER = None; TAG =", "tag: tag = self.write_no_block(8, text) return tag def write_no_block(self, sector, text): (status, TagType)", "READER = None; TAG = { 'id' : None, 'text' : ''}; KEY", "if text: self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def", "None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status", "= self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return", "i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n = 0 for", "return tag def write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status !=", "!= self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY,", "= self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data = bytearray()", "i in range(0, 5): n = n * 256 + uid[i] return n", "self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid):", "tag = self.write_no_block(8, text) while not tag: tag = self.write_no_block(8, text) return tag", "return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if", "uid): n = 0 for i in range(0, 5): n = n *", "status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8)", "self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK:", "self.READER = MFRC522.MFRC522() def read(self): tag = self.read_no_block() while not tag: tag =", "write(self, sector, text): tag = self.write_no_block(8, text) while not tag: tag = self.write_no_block(8,", "self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text =", "data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] =", "write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None", "n = 0 for i in range(0, 5): n = n * 256", "self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data)", "bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for", "self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll() if status", "''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text): tag", "GPIO class SimpleMFRC522: READER = None; TAG = { 'id' : None, 'text'", "= self.read_no_block() return tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status !=", "= self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll() if", "MFRC522.MFRC522() def read(self): tag = self.read_no_block() while not tag: tag = self.read_no_block() return", "= self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if", "status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if", "TAG = { 'id' : None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def", "self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status", "self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n = 0 for i in range(0,", ": ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def read(self): tag", "'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def read(self):", "''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def read(self): tag =", "= self.read_no_block() while not tag: tag = self.read_no_block() return tag def read_no_block(self): (status,", "self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status == self.READER.MI_OK: text =", "(status, uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid)", "self.KEY, uid) if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] =", "if status != self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A,", "self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in text)", "return self.TAG def write(self, sector, text): tag = self.write_no_block(8, text) while not tag:", "__init__(self): self.READER = MFRC522.MFRC522() def read(self): tag = self.read_no_block() while not tag: tag", "None; TAG = { 'id' : None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]", "text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status, uid)", "= ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n", "text): tag = self.write_no_block(8, text) while not tag: tag = self.write_no_block(8, text) return", "not tag: tag = self.read_no_block() return tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL)", "in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n = 0 for i", "= self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status == self.READER.MI_OK:", "self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid)", "while not tag: tag = self.read_no_block() return tag def read_no_block(self): (status, TagType) =", "8, self.KEY, uid) if status == self.READER.MI_OK: text = self.READER.MFRC522_Read(8) if text: self.TAG['text']", "None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def", "KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def read(self): tag = self.read_no_block()", "tag: tag = self.read_no_block() return tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if", "status != self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8,", "i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def write(self, sector, text): tag = self.write_no_block(8,", "read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return None (status, uid)", "self.read_no_block() return tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK:", ": None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522()", "!= self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return", "for i in range(0, 5): n = n * 256 + uid[i] return", "self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status == self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16))", "tag = self.read_no_block() return tag def read_no_block(self): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status", "status != self.READER.MI_OK: return None (status, uid) = self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK:", "self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status == self.READER.MI_OK: text", "self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) if status ==", "self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG", "text = self.READER.MFRC522_Read(8) if text: self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1()", "RPi.GPIO as GPIO class SimpleMFRC522: READER = None; TAG = { 'id' :", "self.write_no_block(8, text) while not tag: tag = self.write_no_block(8, text) return tag def write_no_block(self,", "= self.write_no_block(8, text) return tag def write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL)", "text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self, uid): n = 0 for i in", "= [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def read(self): tag = self.read_no_block() while", "self.TAG def write(self, sector, text): tag = self.write_no_block(8, text) while not tag: tag", "text: self.TAG['text'] = ''.join(chr(i) for i in text) self.READER.MFRC522_StopCrypto1() return self.TAG def uid_to_num(self,", "sector, text): tag = self.write_no_block(8, text) while not tag: tag = self.write_no_block(8, text)", "= self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if status ==", "def write_no_block(self, sector, text): (status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL) if status != self.READER.MI_OK: return", "tag = self.read_no_block() while not tag: tag = self.read_no_block() return tag def read_no_block(self):", "import RPi.GPIO as GPIO class SimpleMFRC522: READER = None; TAG = { 'id'", "= self.READER.MFRC522_Anticoll() if status != self.READER.MI_OK: return None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status", "== self.READER.MI_OK: data = bytearray() data.extend(text.ljust(16)) self.READER.MFRC522_Write(8, data) text = self.READER.MFRC522_Read(8) if text:", "self.TAG def uid_to_num(self, uid): n = 0 for i in range(0, 5): n", "= MFRC522.MFRC522() def read(self): tag = self.read_no_block() while not tag: tag = self.read_no_block()", "[0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER = MFRC522.MFRC522() def read(self): tag = self.read_no_block() while not", "None self.TAG['id'] = self.uid_to_num(uid) self.READER.MFRC522_SelectTag(uid) status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid) self.READER.MFRC522_Read(8) if", "= { 'id' : None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self):", "{ 'id' : None, 'text' : ''}; KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF] def __init__(self): self.READER", "SimpleMFRC522: READER = None; TAG = { 'id' : None, 'text' : ''};", "uid_to_num(self, uid): n = 0 for i in range(0, 5): n = n", "as GPIO class SimpleMFRC522: READER = None; TAG = { 'id' : None," ]
[ "download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects)", "open(input_file, \"r\") as f: return [Project(project) for project in json.load(f)] def download_projects_command(args): projects", "[project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if", "try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False", "output_dir, full_fetch=full_fetch) return True except Exception as e: # pylint: disable=broad-except logging.warning(\"could not", "logging.warning(\"could not download %s: %s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor()", "if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return True", "import Project def download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"] if not full_fetch:", "download %s: %s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor:", "path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch)", "already exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception as", "project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception as e: #", "False download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception as e: # pylint: disable=broad-except", "output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def", "output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False download_git_project(project,", "bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"] if not", "logging.info(\"%s already exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception", "command = [\"git\", \"clone\"] if not full_fetch: command += [\"--depth\", \"1\"] command +=", "except Exception as e: # pylint: disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name,", "import logging import json from bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False): command", "pylint: disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False):", "p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as f: return", "disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with", "import json from bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False): command = [\"git\",", "as e: # pylint: disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name, e) def", "path from concurrent.futures import ThreadPoolExecutor import subprocess import logging import json from bigcode_fetcher.project", "concurrent.futures import ThreadPoolExecutor import subprocess import logging import json from bigcode_fetcher.project import Project", "subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s", "download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception as e: # pylint: disable=broad-except logging.warning(\"could", "# pylint: disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name, e) def download_projects(projects, output_dir,", "executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as", "exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception as e:", "%s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p:", "full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as f: return [Project(project) for project", "os.path as path from concurrent.futures import ThreadPoolExecutor import subprocess import logging import json", "download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"] if not full_fetch: command += [\"--depth\",", "def load_projects_from_file(input_file): with open(input_file, \"r\") as f: return [Project(project) for project in json.load(f)]", "\"r\") as f: return [Project(project) for project in json.load(f)] def download_projects_command(args): projects =", "as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\")", "e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir,", "\"clone\"] if not full_fetch: command += [\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command)", "projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as f: return [Project(project) for project in", "+= [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name)", "= path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False download_git_project(project, output_dir,", "%s: %s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda", "logging import json from bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False): command =", "ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file,", "full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file):", "+= [\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try:", "def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch),", "path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return True except", "full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return", "f: return [Project(project) for project in json.load(f)] def download_projects_command(args): projects = load_projects_from_file(args.input_file) download_projects(projects,", "full_fetch: command += [\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir,", "from concurrent.futures import ThreadPoolExecutor import subprocess import logging import json from bigcode_fetcher.project import", "<gh_stars>1-10 import os.path as path from concurrent.futures import ThreadPoolExecutor import subprocess import logging", "output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir):", "output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as f: return [Project(project) for", "Exception as e: # pylint: disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name, e)", "return False download_git_project(project, output_dir, full_fetch=full_fetch) return True except Exception as e: # pylint:", "def download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already", "with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with", "from bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"] if", "import subprocess import logging import json from bigcode_fetcher.project import Project def download_git_project(project, output_dir,", "as f: return [Project(project) for project in json.load(f)] def download_projects_command(args): projects = load_projects_from_file(args.input_file)", "full_fetch=False): command = [\"git\", \"clone\"] if not full_fetch: command += [\"--depth\", \"1\"] command", "download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\",", "as path from concurrent.futures import ThreadPoolExecutor import subprocess import logging import json from", "download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as f: return [Project(project)", "= [\"git\", \"clone\"] if not full_fetch: command += [\"--depth\", \"1\"] command += [project.clone_url,", "output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir, project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name)", "import os.path as path from concurrent.futures import ThreadPoolExecutor import subprocess import logging import", "[\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir", "json from bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"]", "project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as executor: executor.map(lambda p: download_project(p,", "ThreadPoolExecutor import subprocess import logging import json from bigcode_fetcher.project import Project def download_git_project(project,", "import ThreadPoolExecutor import subprocess import logging import json from bigcode_fetcher.project import Project def", "return True except Exception as e: # pylint: disable=broad-except logging.warning(\"could not download %s:", "Project def download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"] if not full_fetch: command", "output_dir, full_fetch=False): command = [\"git\", \"clone\"] if not full_fetch: command += [\"--depth\", \"1\"]", "def download_git_project(project, output_dir, full_fetch=False): command = [\"git\", \"clone\"] if not full_fetch: command +=", "[\"git\", \"clone\"] if not full_fetch: command += [\"--depth\", \"1\"] command += [project.clone_url, output_dir]", "full_fetch=full_fetch) return True except Exception as e: # pylint: disable=broad-except logging.warning(\"could not download", "True except Exception as e: # pylint: disable=broad-except logging.warning(\"could not download %s: %s\",", "if not full_fetch: command += [\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def", "\"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir =", "subprocess import logging import json from bigcode_fetcher.project import Project def download_git_project(project, output_dir, full_fetch=False):", "command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False): try: output_dir = path.join(output_base_dir,", "project.full_name) if path.isdir(output_dir): logging.info(\"%s already exists\", project.full_name) return False download_git_project(project, output_dir, full_fetch=full_fetch) return", "executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects) def load_projects_from_file(input_file): with open(input_file, \"r\") as f:", "load_projects_from_file(input_file): with open(input_file, \"r\") as f: return [Project(project) for project in json.load(f)] def", "with open(input_file, \"r\") as f: return [Project(project) for project in json.load(f)] def download_projects_command(args):", "command += [\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project, output_base_dir, full_fetch=False):", "not download %s: %s\", project.full_name, e) def download_projects(projects, output_dir, full_fetch=False): with ThreadPoolExecutor() as", "not full_fetch: command += [\"--depth\", \"1\"] command += [project.clone_url, output_dir] subprocess.run(command) def download_project(project,", "return [Project(project) for project in json.load(f)] def download_projects_command(args): projects = load_projects_from_file(args.input_file) download_projects(projects, args.output_dir)", "e: # pylint: disable=broad-except logging.warning(\"could not download %s: %s\", project.full_name, e) def download_projects(projects," ]
[ "import glob import json import logging as lg from pathlib import Path from", "filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template =", "dpi=1200, bbox_inches='tight') #plt.show() # Make index.html # accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'],", "Make index.html # accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats", "live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]):", "the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info':", "if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths']", "transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max,", "%I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\"", "= os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv')", "elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today", "sns import matplotlib.pyplot as plt from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as", "table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise distribution file. Map will", "x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot ax = plt.axes()", "palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases':", "== live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date:", "\"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if", "= FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\",", "= transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\",", "stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake", "import matplotlib.pyplot as plt from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as transforms", "abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick)", "1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path =", "if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']#", "if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0]))", "live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if", "in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data that is larger live_cases", "yaml import requests import pandas as pd import numpy as np import seaborn", "= mohfw_stats['in_stats']['cases']# new column in live with mohfw value elif date_today == live_cases_latest_date:", "value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if", "lg.warning(\"Failed to write statewise distribution file. Map will use old file even though", "ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot',", "bbox_inches='tight') #plt.show() # Make index.html # accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data',", "deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5)", "new column in live with mohfw value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases']", "%b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3", "# Get ready to pass data to template stats_dict = {'w_cases': w_confirmed, 'w_deaths':", "= 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\",", "Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace) with", "ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left()", "print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path)", "data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal lines", "add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s]", "stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater ,", "mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from clean import", "= live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates", "== 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--',", "ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\",", "Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html", ", 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha':", "== 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5)", "from datetime import date, datetime import yaml import requests import pandas as pd", "## Using data that is larger live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths", "if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered']", "/ Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d", "import date2num, DateFormatter import matplotlib.transforms as transforms from jinja2 import Environment, FileSystemLoader from", "deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max,", "mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else:", "as transforms from jinja2 import Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu", "ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\",", "elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today", "deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] >", "', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time", "#clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready to", "format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ##", "plot ax = plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries',", "#Compare JHU Stats with MoHFW stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater", "int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str]", "ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5)", "print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y'))", "make_chloropleth_json(clean_state_data_path) # Get ready to pass data to template stats_dict = {'w_cases': w_confirmed,", "in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today", "#compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date,", "#jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot", "stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date()", "jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if", "= int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5)", "in live with mohfw value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]):", "live with mohfw value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0]", "int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw value elif date_today", "encoding='utf-8', index=False) # Make plot ax = plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index',", "xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted',", "width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans,", "with open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path", "= template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'],", "'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict =", "xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5)", "datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]):", "melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data that is larger", "- %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader)", "ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None', ' ',", "else: lg.warning(\"Failed to write statewise distribution file. Map will use old file even", "ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\",", "ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\")", "JHU Stats with MoHFW stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater =", "live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today =", "< (xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist()", "jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',',", "import requests import pandas as pd import numpy as np import seaborn as", "live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today ==", "0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in',", "sep=',', encoding='utf-8', index=False) # Make plot ax = plt.axes() kwargs = {'markeredgewidth': 0.25}", "ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt", "ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt =", "else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater", "'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal lines at max values", "DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3", "import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y", "deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\")", "ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red')", "datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date)", "recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max,", "data to a format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',',", "date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new", "sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x,", "cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'],", "in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to a format lineplot", "sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'},", "datetime import date, datetime import yaml import requests import pandas as pd import", "import numpy as np import seaborn as sns import matplotlib.pyplot as plt from", "import Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df,", "direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\",", "TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax =", "= in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\")", "if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths']", "live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats)", "' ', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph')", "'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready to pass data to template", "News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html =", "= plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path", "va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right',", "ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData)", "last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl =", "jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from", "template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook", "= datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] >", "DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5", "'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map':", "date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today >", "= yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding", "ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':',", "old file even though new data is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path,", "**kwargs) # Draw horizontal lines at max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max())", "'', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->',", "> int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered']", "xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html", "= mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered']", "to template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths':", "= datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date,", "plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases':", "#ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\",", "dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal", "if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick)", "mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date =", "is = {date_today_str}\") date_today = date.today() print(date_today) #check date in index live_cases_latest_date =", "max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max", "w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml", "= \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots()", "#print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to", "jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater", "in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater =", "is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to", "to a format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8',", "live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str]", "commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info =", "will use old file even though new data is available\") in_cases_df, in_recoveries_df, in_deaths_df", "%b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio)", "table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv',", "use old file even though new data is available\") in_cases_df, in_recoveries_df, in_deaths_df =", "mohfw_stats['in_stats']['cases']# new column in live with mohfw value elif date_today == live_cases_latest_date: if", "color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1],", "in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater =", "Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon,", "transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max,", "resource yaml with open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader) # add clean", "sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series')", "ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3,", "= os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets',", "#ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt", "= mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths =", "from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df", "os import glob import json import logging as lg from pathlib import Path", "Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\")", "'--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases,", "= mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str]", "myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':',", "plt from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as transforms from jinja2 import", "0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths':", "at max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max())", "in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at", "import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth", "in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to a format", "json import logging as lg from pathlib import Path from datetime import date,", "requests import pandas as pd import numpy as np import seaborn as sns", "ax=ax, **kwargs) # Draw horizontal lines at max values cases_max = int(plot_df['value'].where(plot_df['category'] ==", "& Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries',", "= get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date()", "'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df =", "encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise distribution file. Map will use old", "in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today = date.today() print(date_today)", "resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info':", "mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] >", "left' myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3',", "'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) #", "= extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',',", "exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df", "hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df,", "PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc':", "= plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df,", "get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json", "live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today", "fs: resources = yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in',", "mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] >", "in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) #", "in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater =", "'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M", "Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df()", "date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get", "= os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in',", "resources = yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets')", "#plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5)", "'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'],", "va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(),", "logging as lg from pathlib import Path from datetime import date, datetime import", "'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False)", "in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make", "linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None', '", "in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths", "mohfw value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases']", "'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.',", "filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE)", "os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in',", "column in live with mohfw value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] >", "latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU", "= in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today = date.today()", "= live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df)", "to pass data to template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered,", "though new data is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) #", "lines at max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] ==", "= in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date", "ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left'", "ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1)", "add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready", "= mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not", "\"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if", "> int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]):", "from chloropleth import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s -", "== live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date:", "index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv',", "'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats for", "= xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make", "last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight')", "'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal lines at max", "'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B", "ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans =", "mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG,", "f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the", "ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6)", "template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path =", "transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max,", "ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\",", "ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\",", "#plt.show() # Make index.html # accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data',", "print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date:", "sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data',", "#print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise", "'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats for india", "new data is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming", "mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases)", "'%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda", "cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist()", "= date.today() print(date_today) #check date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1]", "accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare", "= datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today >", "'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path =", "mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] >", "= np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg',", "> jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed =", "if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered']", "hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths':", "from pathlib import Path from datetime import date, datetime import yaml import requests", "live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] =", "w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict", "ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans,", "{'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater}", "melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import", "= live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from mohfw", "datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date:", "'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True,", "os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path", "> live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column", "'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X',", "not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise distribution file.", "datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace", "namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'],", "from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from clean", "= melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8',", "[ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt =", "last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt", "'o'}, ax=ax, **kwargs) # Draw horizontal lines at max values cases_max = int(plot_df['value'].where(plot_df['category']", "elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths)", "plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df)", "#map_json = make_chloropleth_json(clean_state_data_path) # Get ready to pass data to template stats_dict =", "index.html # accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats =", "- xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick,", "datetime import yaml import requests import pandas as pd import numpy as np", "#f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray')", "as plt from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as transforms from jinja2", "Path from datetime import date, datetime import yaml import requests import pandas as", "in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's", "':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries &", "if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise distribution", "cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\")", "rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False)", "va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick:", "clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt',", "ax = plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'],", "> int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]):", "mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] >", "date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today = date.today() print(date_today) #check", "cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\")", "xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else: if", "> int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw", "jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml') as fs: resources", "covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path,", "linewidth=0.5) #'-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'", "date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today >", "is larger live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str =", "#plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df,", "= melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8',", "int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0]", "va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(),", "get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from", "& PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News & Scams'],", "Transforming data to a format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv',", "= int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max,", "color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01,", "int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw value", "transforms from jinja2 import Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from", "stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource", "= mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths']", "{'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads,", "at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict,", "import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from clean import add_clean_state_data", "\"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'],", "= {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus", "ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv')", "'-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries", "that is larger live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str", "#get today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date =", "= Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig,", "ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#,", "print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0]))", "transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom',", "= ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1]", "= get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to a format lineplot likes final_df", "os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'],", "resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace) with open(\"index.html\", \"w+\")", "%p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY", "pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot", "import Path from datetime import date, datetime import yaml import requests import pandas", "format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates')", "ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0,", "in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is", "& Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace)", "live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] =", "ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans", "'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths", "deaths_path) # Transforming data to a format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df,", "= jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml') as fs: resources = yaml.load(fs,", "live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'],", "int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-',", "#D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom()", "'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df", "live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in", "plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5)", "import pandas as pd import numpy as np import seaborn as sns import", "ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html # accquire", "os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution')", "va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red')", "> jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']:", "Using data that is larger live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths =", "MoHFW stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater", "yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean", "ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\",", "state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in',", "numpy as np import seaborn as sns import matplotlib.pyplot as plt from matplotlib.dates", "in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered =", "'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']}", "mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] >", "'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df =", "= {date_today_str}\") date_today = date.today() print(date_today) #check date in index live_cases_latest_date = live_cases.columns[-1]", "ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt =", "= os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW", "# Transforming data to a format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df)", "= plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category',", "> live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date:", "template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data',", "> jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']:", "live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] =", "sep=',', encoding='utf-8', index=False) ## Using data that is larger live_cases = in_cases_df live_recoveries", "as sns import matplotlib.pyplot as plt from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms", "mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths']", "== live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df", "# Draw horizontal lines at max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max", "jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader)", "%d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace =", "plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html # accquire latest statistics covid_daily_reports_path", "jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ##", "file. Map will use old file even though new data is available\") in_cases_df,", "values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max =", "y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False,", "int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted',", "{'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus &", "{date_today_str}\") date_today = date.today() print(date_today) #check date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date", "= {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered':", "deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\")", "int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index']", "#trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max,", "from jinja2 import Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler", "index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats", "table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False)", "np import seaborn as sns import matplotlib.pyplot as plt from matplotlib.dates import date2num,", "## read resource yaml with open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader) #", "%I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources':", "if xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick) < (xt[1] -", "DateFormatter import matplotlib.transforms as transforms from jinja2 import Environment, FileSystemLoader from jhu_handler import", "linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None', ' ', '', 'solid',", "'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv')", "else: if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt,", "# Make index.html # accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports')", "live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats =", "live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df =", "seaborn as sns import matplotlib.pyplot as plt from matplotlib.dates import date2num, DateFormatter import", "= date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today = date.today() print(date_today) #check date", "if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases,", "date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries)", "> int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries)", "import date, datetime import yaml import requests import pandas as pd import numpy", "import matplotlib.transforms as transforms from jinja2 import Environment, FileSystemLoader from jhu_handler import melt_data,", "read resource yaml with open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader) # add", "datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'],", "in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to a format lineplot likes", "s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max,", "import json import logging as lg from pathlib import Path from datetime import", "'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs)", "jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats for india if mohfw_stats['in_stats']['cases']", "clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) #", "w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time':", "recoveries_path, deaths_path) # Transforming data to a format lineplot likes final_df = melt_data(in_cases_df,", "ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None', ' ', '',", "Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases',", "> int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]):", "color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else:", "= mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths']", "recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True)", "= mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths']", "if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if mohfw_stats['in_stats']['deaths']", "color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1])", "date2num, DateFormatter import matplotlib.transforms as transforms from jinja2 import Environment, FileSystemLoader from jhu_handler", "lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using", "matplotlib.transforms as transforms from jinja2 import Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats,", "= melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data that is", "jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot ax =", "extract_clean_df from chloropleth import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s", "va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(),", "Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed", "ready to pass data to template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered':", "'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted',", "'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace) with open(\"index.html\", \"w+\") as", "Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper", "today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date,", "melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)", "ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed',", "#xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() #", "as fs: resources = yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'],", "data to template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater,", "get_mohfw_stats, extract_clean_df from chloropleth import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s]", "final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data that", "chloropleth import make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"),", "encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y'))", "mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] =", "live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's", "= make_chloropleth_json(clean_state_data_path) # Get ready to pass data to template stats_dict = {'w_cases':", "resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News &", "'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df)", "template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\")", "markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal lines at", "os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets',", "print(f\"Today's date is = {date_today_str}\") date_today = date.today() print(date_today) #check date in index", "index=False) else: lg.warning(\"Failed to write statewise distribution file. Map will use old file", "mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered'] if", "ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html # accquire latest", "encoding='utf-8', index=False) ## Using data that is larger live_cases = in_cases_df live_recoveries =", "larger live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\")", "date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif", "'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace) with open(\"index.html\", \"w+\") as f: f.write(rendered_html)", "rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path", "state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about':", "live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] =", "'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'},", "jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot ax = plt.axes() kwargs = {'markeredgewidth':", "= mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda", "x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index']", "transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max],", "live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from", "Draw horizontal lines at max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max =", "'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt)", "ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted', linewidth=0.5) #'-', '--', '-.', ':', 'None',", "matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as transforms from jinja2 import Environment, FileSystemLoader", "'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths')", "ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1] -", "with mohfw value elif date_today == live_cases_latest_date: if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] =", "import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+'", "india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if", "mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else:", "'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases", "mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty:", "= get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats for india if mohfw_stats['in_stats']['cases'] >", "'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y", "in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data that is larger live_cases =", "kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red',", "w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml') as fs: resources =", "'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace) with open(\"index.html\",", "w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml') as", "live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare", "DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write", "plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',',", "live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date", "linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False)", "ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0,", "= jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot ax", "# Make plot ax = plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category',", "data that is larger live_cases = in_cases_df live_recoveries = in_recoveries_df live_deaths = in_deaths_df", "add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path", "from mohfw mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date", "pathlib import Path from datetime import date, datetime import yaml import requests import", "available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to a", "w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d,", "print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date =", "'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax,", "final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data that is larger live_cases = in_cases_df", "int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if", "linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right') #plt.yticks(fontsize=6) ax.spines['top'].set_visible(False)", "color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01,", "'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats for india if", "extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8',", "live_recoveries = in_recoveries_df live_deaths = in_deaths_df date_today_str = date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is =", "date is = {date_today_str}\") date_today = date.today() print(date_today) #check date in index live_cases_latest_date", "live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] =", "int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] >", "- last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl", "= {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link':", "transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max, cases_max,", "jinja2 import Environment, FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import", "a format lineplot likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)", "mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x:", "resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info} #,'c_map': map_json", "np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200,", "= os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'],", "Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b", "print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv',", "melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)", "== 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max,", "commit_info_dict, 'state_info': state_info} #,'c_map': map_json rendered_html = template.render(**namespace) with open(\"index.html\", \"w+\") as f:", "date.today() print(date_today) #check date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date", "#ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5, rotation=0)#, ha='right')", "{'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"}", "ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max,", "xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show()", "= DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio =", "'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\")", "Map will use old file even though new data is available\") in_cases_df, in_recoveries_df,", "'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\")", "%(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env =", "date_today = date.today() print(date_today) #check date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date =", "pass data to template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases':", "write statewise distribution file. Map will use old file even though new data", "stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater =", "> live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today", "'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df)", "lg from pathlib import Path from datetime import date, datetime import yaml import", "Get ready to pass data to template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths,", "# add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path)", "plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False)", "& the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict,", "cases_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01,", "[%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env", "'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max,", "get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']:", "import yaml import requests import pandas as pd import numpy as np import", "'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path,", "else: in_cases_greater = jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater", "make_chloropleth_json from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S", "index=False) ## Using data that is larger live_cases = in_cases_df live_recoveries = in_recoveries_df", "= os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready to pass", "if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats", "'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News", "else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered", "os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready to pass data", "print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw value elif", "%Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics':", "= mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered']", "= jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered']", "date, datetime import yaml import requests import pandas as pd import numpy as", "= add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else:", "get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date", "plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv') recoveries_path =", "#ax.spines['left'].set_edgecolor('gray') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.tick_params(axis=\"x\", direction='in', length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(),", "Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info}", "import logging as lg from pathlib import Path from datetime import date, datetime", "length=3, width=0.5) ax.get_yaxis().set_visible(False) ax.spines['bottom'].set_linewidth(0.5) ax.spines['left'].set_linewidth(0.5) #trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData) #ax.text(0, cases_max, color=\"red\", s=cases_max,", "format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html # accquire latest statistics covid_daily_reports_path =", "'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal lines at max values cases_max =", "linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html # accquire latest statistics", "FileSystemLoader from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats,", "glob import json import logging as lg from pathlib import Path from datetime", "ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left',", "exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df)", "# accquire latest statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path)", "add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader", "os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with MoHFW stats", "open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader) # add clean datasets state_data_path =", "in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"), 'commit_sha': os.environ['GITHUB_SHA']} state_info", "= DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd", "#'-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted' plt.title('COVID-19", "\"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax = plt.subplots() covid_data_path", "= int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] ==", "'dashed', 'dashdot', 'dotted' plt.title('COVID-19 Cases, Recoveries & Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases /", "#ax.text(0, cases_max, color=\"red\", s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\",", "[ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1]", "= jhu_stats['in_stats']['cases'] if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths']", "(xt[1] - xt[0])/2: xt.pop(-1) #xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt)", "FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\":", "statistics covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats", "recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1],", "from clean import add_clean_state_data lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#,", "deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True,", "#xt = np.append(xt, last_x_tick) xt.append(last_x_tick) #xtl = xt.tolist() ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\",", "if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live", "va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick", "date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick) < (xt[1]", "matplotlib.pyplot as plt from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as transforms from", "covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports') jhu_stats = get_jhu_stats(covid_daily_reports_path) #Compare JHU Stats with", "with MoHFW stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else:", "from matplotlib.dates import date2num, DateFormatter import matplotlib.transforms as transforms from jinja2 import Environment,", "'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready to pass data to", "'commit_sha': os.environ['GITHUB_SHA']} state_info = {'link': f\"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv\"} namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY &", "= date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick) <", "'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json", "if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today", "= jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered']", "template stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater", "dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date()", "ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d", "clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path =", "statewise distribution file. Map will use old file even though new data is", "mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0])) live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with", "jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml')", "Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df =", "even though new data is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path)", "#world stats w_confirmed = jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read", "frameon=False)#loc='upper left' myFmt = DateFormatter(\"%d %b\") #myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values)", "'%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot ax = plt.axes() kwargs =", "import seaborn as sns import matplotlib.pyplot as plt from matplotlib.dates import date2num, DateFormatter", "add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if not table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed", "'d', 'cases': 'o'}, ax=ax, **kwargs) # Draw horizontal lines at max values cases_max", "plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] = jhu_df['index'].apply(lambda x:", "style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd',", "ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick =", "'datasets') print(\"adding clean datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json =", "mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed", "{'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green',", "data is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data", "cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category']", "table_df.empty: table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise distribution file. Map", "live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, \"%m/%d/%y\").date() live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date,", "xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2:", "jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater", "in_deaths_greater , 'i_recovered': in_recovered_greater} commit_info_dict = {'current_time': datetime.now().strftime(\"%B %d, %Y at %I:%M %p\"),", "xt.pop(-1) else: if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1) #xt =", "Environment(loader=template_loader) TEMPLATE = \"template.html\" template = template_env.get_template(TEMPLATE) sns.set(style=\"ticks\") sns.set_context(\"paper\", rc={\"font.size\":8,\"axes.titlesize\":9,\"axes.labelsize\":10,\"lines.linewidth\": 1.5,'lines.markersize':3})#paper,talk,notebook fig, ax", "import os import glob import json import logging as lg from pathlib import", "s=cases_max, transform=trans, ha=\"right\", va=\"center\") #ax.text(0, deaths_max, color=\"red\", s=deaths_max, transform=trans, ha=\"right\", va=\"center\") ax.text(0.01, cases_max,", "os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df", "live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df", "for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases'] else: in_cases_greater = jhu_stats['in_stats']['cases']", "%p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE = \"template.html\" template", "print(date_today) #check date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date =", "#myFmt = DateFormatter(\"%d %b %y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio", "#ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red') xt = ax.get_xticks().tolist() last_x_tick = date2num(plot_df['index'].values[-1]) if", "color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red') #ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max],", "as pd import numpy as np import seaborn as sns import matplotlib.pyplot as", "date.today().strftime(\"%-m/%-d/%y\") print(f\"Today's date is = {date_today_str}\") date_today = date.today() print(date_today) #check date in", "pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt", "Make plot ax = plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases',", "recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max()) ax.axhline(cases_max, ls='dotted', linewidth=0.5) ax.axhline(deaths_max, ls='dotted', linewidth=0.5) ax.axhline(recoveries_max, ls='dotted',", "fig, ax = plt.subplots() covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series') cases_path = os.path.join(covid_data_path,", "ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max, recoveries_max, color=\"green\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") #ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom',", "'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True) mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table", "date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today ==", "index=False) # Make plot ax = plt.axes() kwargs = {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value',", "%(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE", "= {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes':", "as lg from pathlib import Path from datetime import date, datetime import yaml", "last_x_tick = date2num(plot_df['index'].values[-1]) if xt[-1] > last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick)", "int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered'] elif date_today == live_recoveries_latest_date: if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]): live_recoveries.iloc[:,-1:].iloc[0]", "mohfw_data_df = mohfw_data_to_df() table_df = extract_clean_df(mohfw_data_df) table_df = add_lat_lon(table_df) #print(\"Table DF\") #print(table_df) if", "as np import seaborn as sns import matplotlib.pyplot as plt from matplotlib.dates import", "live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df =", "= jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml with open('resources.yaml') as fs:", "lg.basicConfig(level=lg.DEBUG, format=(\"[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s\"), datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader =", "= jhu_stats['w_stats']['cases'] w_deaths = jhu_stats['w_stats']['deaths'] w_recovered = jhu_stats['w_stats']['recovered'] ## read resource yaml with", "live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif date_today == live_deaths_latest_date: if", "Deaths Graph') ax.set(xlabel='Time ->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'],", "live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw value elif date_today ==", "datetime.strptime(x, '%m/%d/%y')) jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) # Make plot ax = plt.axes() kwargs", "%y\") ax.xaxis.set_major_formatter(myFmt) #ax.set(xticks=final_df['index'].values) ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3 ratio = 0.5 ax.set_aspect(1.0/ax.get_data_ratio()*ratio) plt.xticks(fontsize=5,", "Stats with MoHFW stats for india if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']: in_cases_greater = mohfw_stats['in_stats']['cases']", "live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1] #get today's stats from mohfw mohfw_stats", "datefmt=\"%d-%b-%Y %I:%M:%S %p\")#, filename='log.txt', filemode='a+' template_loader = FileSystemLoader('./templates') template_env = Environment(loader=template_loader) TEMPLATE =", "> last_x_tick: xt.pop(-1) else: if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2: xt.pop(-1)", "sep=',', encoding='utf-8', index=False) else: lg.warning(\"Failed to write statewise distribution file. Map will use", "#check date in index live_cases_latest_date = live_cases.columns[-1] live_recoveries_latest_date = live_recoveries.columns[-1] live_deaths_latest_date = live_deaths.columns[-1]", "'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries':", "int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max()) recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max())", "'time_series_covid19_recovered_global.csv') deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets',", "= os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv') Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True) Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True)", "get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path) # Transforming data to a format lineplot likes final_df =", "jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world", "datetime.strptime(x, '%m/%d/%y')) plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) #jhu_df['index'] =", "mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]): live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases'] if date_today > live_recoveries_latest_date: print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0])) if", "jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater = jhu_stats['in_stats']['recovered'] #world stats w_confirmed = jhu_stats['w_stats']['cases']", "likes final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df) final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False) ## Using data", "mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths'] print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths,", "yaml with open('resources.yaml') as fs: resources = yaml.load(fs, yaml.SafeLoader) # add clean datasets", "file even though new data is available\") in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path,", "in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']: in_recovered_greater = mohfw_stats['in_stats']['recovered'] else: in_recovered_greater =", "ax.set_xticks(xt) ax.axvline(last_x_tick, ls='dotted', linewidth=0.5) plt.savefig(\"graph.svg\", format='svg', dpi=1200, bbox_inches='tight') #plt.show() # Make index.html #", "print(live_cases) print(live_deaths) print(live_recoveries) plot_df = melt_data(live_cases, live_deaths, live_recoveries) #plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x,", "horizontal lines at max values cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max()) deaths_max = int(plot_df['value'].where(plot_df['category']", "->', ylabel='Cases / Deaths') ax.xaxis.label.set_visible(False) ax.yaxis.label.set_visible(False) ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left' myFmt", "yaml.SafeLoader) # add clean datasets state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets') print(\"adding clean datasets\")", "datasets\") add_clean_state_data(state_data_path) #clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get", "mohfw_stats['in_stats']['recovered'] if date_today > live_deaths_latest_date: if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]): live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths'] elif", "= {'markeredgewidth': 0.25} sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries':", "distribution file. Map will use old file even though new data is available\")", "if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']: in_deaths_greater = mohfw_stats['in_stats']['deaths'] else: in_deaths_greater = jhu_stats['in_stats']['deaths'] if mohfw_stats['in_stats']['recovered']", "'clean_daily_statewise_distribution') #map_json = make_chloropleth_json(clean_state_data_path) # Get ready to pass data to template stats_dict", "live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, \"%m/%d/%y\").date() print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date) if date_today > live_cases_latest_date: if mohfw_stats['in_stats']['cases']", "'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs) # Draw", "to write statewise distribution file. Map will use old file even though new", "color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, deaths_max, deaths_max, color=\"red\", transform=ax.get_yaxis_transform(), ha=\"left\", va=\"bottom\") ax.text(0.01, recoveries_max,", "mohfw_stats = get_mohfw_stats(table_df) print(mohfw_stats) #compare dates live_cases_latest_date = datetime.strptime(live_cases_latest_date, \"%m/%d/%y\").date() live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date," ]
[ "= u\"─\" if y_off is not None and y_rep is not None: if", "import numpy as np from color_schemes import color_func def get_term_size(): rows, columns =", "= Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width", "A_right_shaded): # fully shaded return [] else: # partially shaded if A_left_shaded: return", "if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in range(1,size.col): tick_char", "is float: return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type", "def visible_check((A_left, A_right, _), (B_left, B_right, B_id)): # compare the left/right bound of", "or A_right_dodged: # dodged return ((B_left, B_right, B_id),) elif not (A_left_shaded or A_right_shaded):", "0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3 + 4 def add_hist(self,", "A_right_dodged = A_left > B_right # Four cases of shading: # 1. dodged:", "\"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in", "def __str__(self): return str(self.fore) + \" \" + str(self.back) class Rect: \"\"\" Rect:", "cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \" + cell + \"", "/ (len(table) * 3+3)) color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \"", "u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table, color_func,", "content records position, size, color and content only. \"\"\" def __init__(self, pos, color,", "* inc[1]) elif len(inc) == 3: return CharColor(self.fore * inc, self.back * inc)", "= 30 bar_width = 5 if anchor is None: anchor = Pos(self.current_line, (self.cols", "_), (B_left, B_right, B_id)): # compare the left/right bound of new element with", "color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line += 30 def render_line(self, line_num, is_reset=False):", "pos = Pos(line, ith*bar_width) + hist_anchor if height * (1 - val/max_val) <", "inc)) else: raise TypeError(\"operand type must be either 3-tuple or int\") def __str__(self):", "+= 30 def render_line(self, line_num, is_reset=False): \"\"\" render elements in single line \"\"\"", "or A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and", "2. shaded: fore element shaded at left or right bound of back #", "back) self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3 + 4 def add_hist(self, hist,", "c in table]) # Get reformed string and calculate max length for row", "self.g + inc.g, self.b + inc.b) elif type(inc) == int: return Color(self.r +", "is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for", "== \"__main__\": c = Canvas() grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1)))", "self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\" if x_off is not None and", "3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False):", "self.row = row self.col = col def __add__(self, pos): return Pos(self.row + pos.row,", "type(inc) is int: return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand", "= CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3 +", "inc), int(self.b * inc)) else: raise TypeError(\"operand type must be either 3-tuple or", "= color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__ == \"__main__\": c =", "u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in sides:", "inc[0], self.back * inc[1]) elif len(inc) == 3: return CharColor(self.fore * inc, self.back", "return Pos(self.row * pos_time.row, self.col * pos_time.row) def __str__(self): return \"{%d, %d}\" %", "self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self,", "+ y_off) % y_rep == 0: tick_char = u\"┴\" if \"top\" in sides:", "len(inc) == 2: return CharColor(self.fore + inc[0], self.back + inc[1]) elif len(inc) ==", "TypeError(\"operand type must be either 3-tuple or int\") def __str__(self): return \"{%d, %d,", "fore and back element doesn't overlap # 2. shaded: fore element shaded at", "row] in enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True)", "inc) else: raise TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif type(inc)", "if anchor is None: anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size -", "def stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02'", "in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor,", "tuple: if len(inc) == 2: return CharColor(self.fore * inc[0], self.back * inc[1]) elif", "or 2-tuple\") elif type(inc) is int: return CharColor(self.fore + inc, self.back + inc)", "type must be either 3-tuple or 2-tuple\") elif type(inc) is float: return CharColor(self.fore", "__mul__(self, inc): if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore *", "inc[1]) elif len(inc) == 3: return CharColor(self.fore + inc, self.back + inc) else:", "x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255, 255)) for l", "it with one more # tuple in order to maintain the form. visible_parts", "the right side thermo_left = len(table[0]) * cell_size + 10 for line in", "== 3: return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type", "B_left A_right_dodged = A_left > B_right # Four cases of shading: # 1.", "= back def __add__(self, inc): if type(inc) == tuple: if len(inc) == 2:", "float else cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \" +", "elif type(inc) is int: return CharColor(self.fore + inc, self.back + inc) else: raise", "else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line", "Color(int(self.r * inc), int(self.g * inc), int(self.b * inc)) else: raise TypeError(\"operand type", "1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a histogram", "(l + y_off) % y_rep == 0: tick_char = u\"┴\" if \"top\" in", "* (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def add_empty_line(self, pos):", "+ inc, self.back + inc) else: raise TypeError(\"operand type must be either 3-tuple", "raise TypeError(\"operand type must be either 3-tuple or int\") def __str__(self): return \"{%d,", "A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded):", "anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height + 3,", "48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK,", "for row in table: for cell in row: try: new_cell = \"%1.2f\" %", "* (1 - val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \"", "B_left A_right_shaded = A_right >= B_right A_left_dodged = A_right < B_left A_right_dodged =", "3: return Color(self.r * inc[0], self.g * inc[1], self.b * inc[2]) elif type(inc)", "\"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor,", "A_left, B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text),", "== 3: return Color(self.r + inc[0], self.g + inc[1], self.b + inc[2]) elif", "each cell into element table # and calculates the max cell length cell_anchor", "cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \" + cell + \" \")) #", "float: return Color(int(self.r * inc), int(self.g * inc), int(self.b * inc)) else: raise", "'\\x01\\x1b[0m\\x02' for part in visible_parts: elem = elems_inline[part[2]] color = elem.color text =", "length for row in table: for cell in row: try: new_cell = \"%1.2f\"", "color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3", "in table: for cell in row: try: new_cell = \"%1.2f\" % cell except", "inc): if type(inc) == inc and len(inc) == 3: return Color(self.r + inc[0],", "class Rect: \"\"\" Rect: Draw a rectangle area with given fore/back color and", "min_cell = min([min(c) for c in table]) max_cell = max([max(c) for c in", "__name__ == \"__main__\": c = Canvas() grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15,", "+= len(table)*3 + 4 def add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0]) height", "c = Canvas() grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is", "get_term_size(): rows, columns = os.popen('stty size', 'r').read().split() return int(rows), int(columns) class Pos: def", "splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded): # fully", "def add_cell(cell, anchor, pos, isBlank=False): pos = pos * (1, cell_size) + anchor", "return \"{%d, %d, %d}\" % (self.r, self.g, self.b) class CharColor: def __init__(self, fore,", "if (l + y_off) % y_rep == 0: tick_char = u\"┴\" if \"top\"", "shaded at left or right bound of back # element. # 3. split:", "\"%1.2f\" % cell except TypeError: new_cell = cell if cell_size < len(new_cell): cell_size", "# 2. shaded: fore element shaded at left or right bound of back", "elements hold by Canvas. elems = [] # for successively adding elements current_line", "pos.row, self.col + pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row", "Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width +", "existing bound. A_left_shaded = A_left <= B_left A_right_shaded = A_right >= B_right A_left_dodged", "< B_left A_right_dodged = A_left > B_right # Four cases of shading: #", "and back element doesn't overlap # 2. shaded: fore element shaded at left", "pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col", "visible_parts: elem = elems_inline[part[2]] color = elem.color text = elem.text[part[0] - elem.pos.col :", "element table # and calculates the max cell length cell_anchor = anchor +", "from color_schemes import color_func def get_term_size(): rows, columns = os.popen('stty size', 'r').read().split() return", "A_right_dodged: # dodged return ((B_left, B_right, B_id),) elif not (A_left_shaded or A_right_shaded): #", "def __str__(self): return \"{%d, %d}\" % (self.row, self.col) class Color: def __init__(self, r,", "+ 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2, 3)", "part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation by", "# Add a thermometer on the right side thermo_left = len(table[0]) * cell_size", "[elem for elem in self.elems if elem.pos.row == line_num] visible_parts = [] def", "inc, self.back + inc) else: raise TypeError(\"operand type must be tuple\") def __mul__(self,", "sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char))", "TypeError(\"operand type must be either 3-tuple or Color\") def __mul__(self, inc): if type(inc)", "\"\"\" render elements in single line \"\"\" # Find all elements to be", "= Color(0,0,0) elif type(back) == tuple and len(back) == 3: self.back = Color(*back)", "np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(),", "self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK = 48 color_seq", "color and text content records position, size, color and content only. \"\"\" def", "\" + cell + \" \")) # Add each cell into element table", "elif type(inc) == Color: return Color(self.r + inc.r, self.g + inc.g, self.b +", "element shaded at left or right bound of back # element. # 3.", "color, anchor=None): if anchor is None: anchor = Pos(self.current_line, (self.cols - len(text)) /", "# Find all elements to be rendered in current line elems_inline = [elem", "int(r) self.g = int(g) self.b = int(b) def __add__(self, inc): if type(inc) ==", "if len(inc) == 2: return CharColor(self.fore * inc[0], self.back * inc[1]) elif len(inc)", "len(elem.text), elem_i) for i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) #", "visible_parts = [] def visible_check((A_left, A_right, _), (B_left, B_right, B_id)): # compare the", "\" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts: elem =", "Pos(self.row * pos_time[0], self.col * pos_time[1]) return Pos(self.row * pos_time.row, self.col * pos_time.row)", "anchor + Pos(2, 3) for line in range(height): for ith, val in enumerate(hist[0]):", "no elements in this line strokes = \"\" if visible_parts == [] else", "= max([max(c) for c in table]) # Get reformed string and calculate max", "example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a histogram example\", CharColor(color_func[\"BlueGreenYellow\"](0.9))) #c.add_hist(grid.tolist(), color_func[\"BlueGreenYellow\"]) c.render(True)", "itertools import numpy as np from color_schemes import color_func def get_term_size(): rows, columns", "= 0 def add_text(self, text, color, anchor=None): if anchor is None: anchor =", "10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"])", "new_cell = cell if cell_size < len(new_cell): cell_size = len(new_cell) cell_size += 2", "pos_time.row) def __str__(self): return \"{%d, %d}\" % (self.row, self.col) class Color: def __init__(self,", "the max cell length cell_anchor = anchor + Pos(2, 1) for [row_num, row]", "type must be either 3-tuple or Color\") def __mul__(self, inc): if type(inc) ==", "reformed string and calculate max length for row in table: for cell in", "def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None,", "return CharColor(self.fore + inc[0], self.back + inc[1]) elif len(inc) == 3: return CharColor(self.fore", "max_val = max(hist[0]) height = 30 bar_width = 5 if anchor is None:", "len(table[0]) * cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size,", "return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type must be", "[] def visible_check((A_left, A_right, _), (B_left, B_right, B_id)): # compare the left/right bound", "color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1., 0.5) cell = \"\" if isBlank", "= color self.text = text class Canvas: rows, cols = get_term_size() # graphic", "table]) max_cell = max([max(c) for c in table]) # Get reformed string and", "row self.col = col def __add__(self, pos): return Pos(self.row + pos.row, self.col +", "= elems_inline[part[2]] color = elem.color text = elem.text[part[0] - elem.pos.col : part[1] -", "A_left_dodged or A_right_dodged: # dodged return ((B_left, B_right, B_id),) elif not (A_left_shaded or", "== line_num] visible_parts = [] def visible_check((A_left, A_right, _), (B_left, B_right, B_id)): #", "= \"\" if visible_parts == [] else \" \" * visible_parts[0][0] COLOR_RESET =", "current line elems_inline = [elem for elem in self.elems if elem.pos.row == line_num]", "b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__ == \"__main__\":", "pos): return Pos(self.row + pos.row, self.col + pos.col) def __mul__(self, pos_time): if type(pos_time)", "is int: return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type", "(len(table) * 3+3)) color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \" \"))", "== 3: self.back = Color(*back) else: self.back = back def __add__(self, inc): if", "((B_left, B_right, B_id),) elif not (A_left_shaded or A_right_shaded): # splitted return ((B_left, A_left,", "= np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9)))", "def __add__(self, inc): if type(inc) == inc and len(inc) == 3: return Color(self.r", "inc[1]) elif len(inc) == 3: return CharColor(self.fore * inc, self.back * inc) else:", "return Pos(self.row + pos.row, self.col + pos.col) def __mul__(self, pos_time): if type(pos_time) is", "back element doesn't overlap # 2. shaded: fore element shaded at left or", "255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\"", "part in visible_parts: elem = elems_inline[part[2]] color = elem.color text = elem.text[part[0] -", "len(inc) == 3: return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand", "== tuple and len(back) == 3: self.back = Color(*back) else: self.back = back", "is not None: if (l + x_off) % x_rep == 0: tick_char =", "np from color_schemes import color_func def get_term_size(): rows, columns = os.popen('stty size', 'r').read().split()", "Color(*back) else: self.back = back def __add__(self, inc): if type(inc) == tuple: if", "\"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255, 255))", "def __str__(self): return \"{%d, %d, %d}\" % (self.r, self.g, self.b) class CharColor: def", "height * (1 - val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2),", "range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK = 48", "else: raise TypeError(\"operand type must be either 3-tuple or Color\") def __mul__(self, inc):", "visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles if no elements in this line", "text class Canvas: rows, cols = get_term_size() # graphic elements hold by Canvas.", "Canvas: rows, cols = get_term_size() # graphic elements hold by Canvas. elems =", "self.back = back def __add__(self, inc): if type(inc) == tuple: if len(inc) ==", "COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush()", "else: raise TypeError(\"operand type must be tuple\") def __mul__(self, inc): if type(inc) ==", "return \"{%d, %d}\" % (self.row, self.col) class Color: def __init__(self, r, g, b):", "l)+anchor, color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color,", "x_off) % x_rep == 0: tick_char = u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l,", "int(b) def __add__(self, inc): if type(inc) == inc and len(inc) == 3: return", "elems_inline = [elem for elem in self.elems if elem.pos.row == line_num] visible_parts =", "+ inc.r, self.g + inc.g, self.b + inc.b) elif type(inc) == int: return", "line_num] visible_parts = [] def visible_check((A_left, A_right, _), (B_left, B_right, B_id)): # compare", "cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True)", "self.back * inc) else: raise TypeError(\"operand type must be either 3-tuple or 2-tuple\")", "side thermo_left = len(table[0]) * cell_size + 10 for line in range(1, len(table)", "def __mul__(self, inc): if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore", "col def __add__(self, pos): return Pos(self.row + pos.row, self.col + pos.col) def __mul__(self,", "2-tuple\") elif type(inc) is int: return CharColor(self.fore + inc, self.back + inc) else:", "COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back", "int: return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type must", "not None and x_rep is not None: if (l + x_off) % x_rep", "elem.pos.col] strokes += self.stroke(text, color) strokes += COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes", "hold by Canvas. elems = [] # for successively adding elements current_line =", "x:x[0]) # handles if no elements in this line strokes = \"\" if", "# 1. dodged: the fore and back element doesn't overlap # 2. shaded:", "= visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain flatten both list", "self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3 + 4 def add_hist(self, hist, color_func,", "inc, self.back * inc) else: raise TypeError(\"operand type must be tuple\") def __str__(self):", "single line \"\"\" # Find all elements to be rendered in current line", "in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i, part in", "CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type must be tuple\")", "r, g, b): self.r = int(r) self.g = int(g) self.b = int(b) def", "cell_size < len(new_cell): cell_size = len(new_cell) cell_size += 2 if anchor is None:", "Canvas() grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap", "thermo_left) + anchor back = color_func(1.0 - 1.0 * line / (len(table) *", "(elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound,", "if A_right_shaded: return ((B_left, A_left, B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound =", "# -*- coding: utf-8 -*- import os import sys import itertools import numpy", "+= 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size,", "for elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for", "ith*bar_width) + hist_anchor if height * (1 - val/max_val) < line: color =", "for c in table]) max_cell = max([max(c) for c in table]) # Get", "(1 - val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \"))", "= u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in", "color*2), \" \")) self.current_line += 30 def render_line(self, line_num, is_reset=False): \"\"\" render elements", "(self.cols - len(table[0]) * cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3,", "raise TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif type(inc) is float:", "if cell_size < len(new_cell): cell_size = len(new_cell) cell_size += 2 if anchor is", "u\"│\" if x_off is not None and x_rep is not None: if (l", "2) self.elems.append(Rect(pos, color, \" \" + cell + \" \")) # Add each", "== tuple and len(inc) == 3: return Color(self.r * inc[0], self.g * inc[1],", "for ith, val in enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor if height", "= elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col] strokes += self.stroke(text, color) strokes", "sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE", "\" \")) self.current_line += 30 def render_line(self, line_num, is_reset=False): \"\"\" render elements in", "self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table, color_func, anchor=None):", "TypeError: new_cell = cell if cell_size < len(new_cell): cell_size = len(new_cell) cell_size +=", "in visible_parts: elem = elems_inline[part[2]] color = elem.color text = elem.text[part[0] - elem.pos.col", "x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2, 3) for line in", "for l in range(1,size.col): tick_char = u\"─\" if y_off is not None and", "len(table)*3 + 4 def add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0]) height =", "back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1., 0.5) cell = \"\"", "[row_num, row] in enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num),", "= \"\" if isBlank else \"%1.2f\" % cell if type(cell) is float else", "back) * (1., 0.5) cell = \"\" if isBlank else \"%1.2f\" % cell", "% (self.r, self.g, self.b) class CharColor: def __init__(self, fore, back=None): if type(fore) ==", "\" + str(self.back) class Rect: \"\"\" Rect: Draw a rectangle area with given", "Color(self.r * inc[0], self.g * inc[1], self.b * inc[2]) elif type(inc) == float:", "+ inc[1]) elif len(inc) == 3: return CharColor(self.fore + inc, self.back + inc)", "* inc[1], self.b * inc[2]) elif type(inc) == float: return Color(int(self.r * inc),", "length cell_anchor = anchor + Pos(2, 1) for [row_num, row] in enumerate(table): for", "to be rendered in current line elems_inline = [elem for elem in self.elems", "be rendered in current line elems_inline = [elem for elem in self.elems if", "\")) self.current_line += 30 def render_line(self, line_num, is_reset=False): \"\"\" render elements in single", "if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\")", "is_reset=False): \"\"\" render elements in single line \"\"\" # Find all elements to", "sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\"))", "1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0),", "inc, self.g + inc, self.b + inc) else: raise TypeError(\"operand type must be", "must be either 3-tuple or Color\") def __mul__(self, inc): if type(inc) == tuple", "return ((B_left, A_left, B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col", "TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif type(inc) is int: return", "self.g + inc, self.b + inc) else: raise TypeError(\"operand type must be either", "if A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded: return ((B_left, A_left, B_id),) for", "b=c.back.b) return fore+back+text if __name__ == \"__main__\": c = Canvas() grid = np.random.random_sample(((7,", "add_grid(self, table, color_func, anchor=None): cell_size = 0 min_cell = min([min(c) for c in", "if len(inc) == 2: return CharColor(self.fore + inc[0], self.back + inc[1]) elif len(inc)", "* pos_time.row) def __str__(self): return \"{%d, %d}\" % (self.row, self.col) class Color: def", "adding elements current_line = 0 def add_text(self, text, color, anchor=None): if anchor is", "self.col * pos_time[1]) return Pos(self.row * pos_time.row, self.col * pos_time.row) def __str__(self): return", "inc) else: raise TypeError(\"operand type must be either 3-tuple or Color\") def __mul__(self,", "sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in range(1,size.col): tick_char = u\"─\" if", "__init__(self, fore, back=None): if type(fore) == tuple and len(fore) == 3: self.fore =", "== inc and len(inc) == 3: return Color(self.r + inc[0], self.g + inc[1],", "def __mul__(self, inc): if type(inc) == tuple and len(inc) == 3: return Color(self.r", "2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1., 0.5) cell =", "* cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0)", "for successively adding elements current_line = 0 def add_text(self, text, color, anchor=None): if", "enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2,", "def render_line(self, line_num, is_reset=False): \"\"\" render elements in single line \"\"\" # Find", "back == None : self.back = Color(0,0,0) elif type(back) == tuple and len(back)", "= text class Canvas: rows, cols = get_term_size() # graphic elements hold by", "% (self.row, self.col) class Color: def __init__(self, r, g, b): self.r = int(r)", "back element into two visible # parts. if A_left_dodged or A_right_dodged: # dodged", "sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset) def", "(and all iterables), thus we have to coat it with one more #", "3-tuple or 2-tuple\") elif type(inc) is int: return CharColor(self.fore + inc, self.back +", "inc.r, self.g + inc.g, self.b + inc.b) elif type(inc) == int: return Color(self.r", "the fore and back element doesn't overlap # 2. shaded: fore element shaded", "and len(fore) == 3: self.fore = Color(*fore) else: self.fore = fore if back", "raise TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif type(inc) is int:", "pos * (1, cell_size) + anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color", "pos_time): if type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1]) return", "raise TypeError(\"operand type must be tuple\") def __str__(self): return str(self.fore) + \" \"", "raise TypeError(\"operand type must be either 3-tuple or Color\") def __mul__(self, inc): if", "inc, self.back + inc) else: raise TypeError(\"operand type must be either 3-tuple or", "6): pos = Pos(line, thermo_left) + anchor back = color_func(1.0 - 1.0 *", "self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \"", "u\"─\" if y_off is not None and y_rep is not None: if (l", "= A_left <= B_left A_right_shaded = A_right >= B_right A_left_dodged = A_right <", "3. split: fore element splits back element into two visible # parts. if", "tuple and len(fore) == 3: self.fore = Color(*fore) else: self.fore = fore if", "* pos_time.row, self.col * pos_time.row) def __str__(self): return \"{%d, %d}\" % (self.row, self.col)", "size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size", "return ((B_left, B_right, B_id),) elif not (A_left_shaded or A_right_shaded): # splitted return ((B_left,", "height = 30 bar_width = 5 if anchor is None: anchor = Pos(self.current_line,", "inc[1], self.b * inc[2]) elif type(inc) == float: return Color(int(self.r * inc), int(self.g", "compare the left/right bound of new element with each # existing bound. A_left_shaded", "add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num),", "and x_rep is not None: if (l + x_off) % x_rep == 0:", "Pos(line, ith*bar_width) + hist_anchor if height * (1 - val/max_val) < line: color", "- 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell,", "either 3-tuple or Color\") def __mul__(self, inc): if type(inc) == tuple and len(inc)", "# partially shaded if A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded: return ((B_left,", "type must be either 3-tuple or int\") def __str__(self): return \"{%d, %d, %d}\"", "by itertools.chain flatten both list and # tuple (and all iterables), thus we", "-*- import os import sys import itertools import numpy as np from color_schemes", "\" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts: elem = elems_inline[part[2]]", "%d, %d}\" % (self.r, self.g, self.b) class CharColor: def __init__(self, fore, back=None): if", "either 3-tuple or int\") def __str__(self): return \"{%d, %d, %d}\" % (self.r, self.g,", "self.text = text class Canvas: rows, cols = get_term_size() # graphic elements hold", "= anchor + Pos(2, 3) for line in range(height): for ith, val in", "if no elements in this line strokes = \"\" if visible_parts == []", "== [] else \" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in", "fully shaded return [] else: # partially shaded if A_left_shaded: return ((A_right, B_right,", "= col def __add__(self, pos): return Pos(self.row + pos.row, self.col + pos.col) def", "a thermometer on the right side thermo_left = len(table[0]) * cell_size + 10", "color_func def get_term_size(): rows, columns = os.popen('stty size', 'r').read().split() return int(rows), int(columns) class", "be either 3-tuple or 2-tuple\") elif type(inc) is float: return CharColor(self.fore * inc,", "/ 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos,", "tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1]) return Pos(self.row * pos_time.row, self.col", "* (1, cell_size) + anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color =", "Draw a rectangle area with given fore/back color and text content records position,", "\"\"\" def __init__(self, pos, color, text): self.pos = pos self.color = color self.text", "5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2, 3) for", "line elems_inline = [elem for elem in self.elems if elem.pos.row == line_num] visible_parts", "if elem.pos.row == line_num] visible_parts = [] def visible_check((A_left, A_right, _), (B_left, B_right,", "A_right_shaded = A_right >= B_right A_left_dodged = A_right < B_left A_right_dodged = A_left", "= color_func(1.0 - 1.0 * line / (len(table) * 3+3)) color = CharColor(Color(0,", "True) # Add a thermometer on the right side thermo_left = len(table[0]) *", "have to coat it with one more # tuple in order to maintain", "elif type(inc) == float: return Color(int(self.r * inc), int(self.g * inc), int(self.b *", "of new element with each # existing bound. A_left_shaded = A_left <= B_left", "self.col * pos_time.row) def __str__(self): return \"{%d, %d}\" % (self.row, self.col) class Color:", "def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset) def stroke(self,", "col_num), True) # Add a thermometer on the right side thermo_left = len(table[0])", "elem.pos.row == line_num] visible_parts = [] def visible_check((A_left, A_right, _), (B_left, B_right, B_id)):", "must be either 3-tuple or 2-tuple\") elif type(inc) is float: return CharColor(self.fore *", "list flatten operation by itertools.chain flatten both list and # tuple (and all", "+ str(self.back) class Rect: \"\"\" Rect: Draw a rectangle area with given fore/back", "u\"│\")) for l in range(1,size.col): tick_char = u\"─\" if y_off is not None", "+ inc, self.b + inc) else: raise TypeError(\"operand type must be either 3-tuple", "one more # tuple in order to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts))", "and y_rep is not None: if (l + y_off) % y_rep == 0:", "# graphic elements hold by Canvas. elems = [] # for successively adding", "elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col] strokes += self.stroke(text, color) strokes +=", "type(inc) is float: return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand", "in this line strokes = \"\" if visible_parts == [] else \" \"", "Color(self.r + inc.r, self.g + inc.g, self.b + inc.b) elif type(inc) == int:", "back def __add__(self, inc): if type(inc) == tuple: if len(inc) == 2: return", "color_schemes import color_func def get_term_size(): rows, columns = os.popen('stty size', 'r').read().split() return int(rows),", "else \" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts: elem", "color) strokes += COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def", "cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \" + cell +", "left or right bound of back # element. # 3. split: fore element", "if anchor is None: anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) /", "or 2-tuple\") elif type(inc) is float: return CharColor(self.fore * inc, self.back * inc)", "x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos = pos * (1,", "+ anchor) tick_char = u\"│\" if x_off is not None and x_rep is", "= color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1., 0.5) cell = \"\" if", "int\") def __str__(self): return \"{%d, %d, %d}\" % (self.r, self.g, self.b) class CharColor:", "self.col = col def __add__(self, pos): return Pos(self.row + pos.row, self.col + pos.col)", "% cell except TypeError: new_cell = cell if cell_size < len(new_cell): cell_size =", "if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row,", "Rect: \"\"\" Rect: Draw a rectangle area with given fore/back color and text", "2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor,", "table # and calculates the max cell length cell_anchor = anchor + Pos(2,", "type(inc) == float: return Color(int(self.r * inc), int(self.g * inc), int(self.b * inc))", "with one more # tuple in order to maintain the form. visible_parts =", "import itertools import numpy as np from color_schemes import color_func def get_term_size(): rows,", "if isBlank else \"%1.2f\" % cell if type(cell) is float else cell cell", "= int(g) self.b = int(b) def __add__(self, inc): if type(inc) == inc and", "inc.b) elif type(inc) == int: return Color(self.r + inc, self.g + inc, self.b", "visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts: elem = elems_inline[part[2]] color =", "elem_i) for i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list", "if type(inc) == tuple and len(inc) == 3: return Color(self.r * inc[0], self.g", "3-tuple or Color\") def __mul__(self, inc): if type(inc) == tuple and len(inc) ==", "is float else cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \"", "A_left_dodged = A_right < B_left A_right_dodged = A_left > B_right # Four cases", "line \"\"\" # Find all elements to be rendered in current line elems_inline", "class CharColor: def __init__(self, fore, back=None): if type(fore) == tuple and len(fore) ==", "color_func, anchor=None): max_val = max(hist[0]) height = 30 bar_width = 5 if anchor", "0 min_cell = min([min(c) for c in table]) max_cell = max([max(c) for c", "= 0 min_cell = min([min(c) for c in table]) max_cell = max([max(c) for", "None and y_rep is not None: if (l + y_off) % y_rep ==", "A_right, _), (B_left, B_right, B_id)): # compare the left/right bound of new element", "% y_rep == 0: tick_char = u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor,", "l in range(1,size.col): tick_char = u\"─\" if y_off is not None and y_rep", "return [] else: # partially shaded if A_left_shaded: return ((A_right, B_right, B_id),) if", "* inc[0], self.back * inc[1]) elif len(inc) == 3: return CharColor(self.fore * inc,", "utf-8 -*- import os import sys import itertools import numpy as np from", "to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) #", "render elements in single line \"\"\" # Find all elements to be rendered", "return fore+back+text if __name__ == \"__main__\": c = Canvas() grid = np.random.random_sample(((7, 10)))", "u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row,", "is not None and x_rep is not None: if (l + x_off) %", "- elem.pos.col : part[1] - elem.pos.col] strokes += self.stroke(text, color) strokes += COLOR_RESET", "+ inc[1], self.b + inc[2]) elif type(inc) == Color: return Color(self.r + inc.r,", "in range(height): for ith, val in enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor", "== 0: tick_char = u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\"))", "class Canvas: rows, cols = get_term_size() # graphic elements hold by Canvas. elems", "isBlank=False): pos = pos * (1, cell_size) + anchor + Pos(0, 2) back", "== int: return Color(self.r + inc, self.g + inc, self.b + inc) else:", "with given fore/back color and text content records position, size, color and content", "enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i, part in enumerate(visible_parts):", "shaded: fore element shaded at left or right bound of back # element.", "(self.cols - len(text)) / 2) color = color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor,", "is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a histogram example\", CharColor(color_func[\"BlueGreenYellow\"](0.9)))", "Color(0,0,0) elif type(back) == tuple and len(back) == 3: self.back = Color(*back) else:", "cell except TypeError: new_cell = cell if cell_size < len(new_cell): cell_size = len(new_cell)", "3) for line in range(height): for ith, val in enumerate(hist[0]): pos = Pos(line,", "rows, columns = os.popen('stty size', 'r').read().split() return int(rows), int(columns) class Pos: def __init__(self,", "order to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0])", "in range(1,size.col): tick_char = u\"─\" if y_off is not None and y_rep is", "# Add each cell into element table # and calculates the max cell", "sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row,", "3: return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type must", "3-tuple or 2-tuple\") elif type(inc) is float: return CharColor(self.fore * inc, self.back *", "add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add a thermometer", "fore/back color and text content records position, size, color and content only. \"\"\"", "sorted(visible_parts, key=lambda x:x[0]) # handles if no elements in this line strokes =", "inc): if type(inc) == tuple and len(inc) == 3: return Color(self.r * inc[0],", "30 bar_width = 5 if anchor is None: anchor = Pos(self.current_line, (self.cols -", "into two visible # parts. if A_left_dodged or A_right_dodged: # dodged return ((B_left,", "min([min(c) for c in table]) max_cell = max([max(c) for c in table]) #", "\"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255,", "Pos(line, thermo_left) + anchor back = color_func(1.0 - 1.0 * line / (len(table)", "= Pos(line, ith*bar_width) + hist_anchor if height * (1 - val/max_val) < line:", "= Pos(self.current_line, (self.cols - len(text)) / 2) color = color * (0.5, 1.)", "either 3-tuple or 2-tuple\") elif type(inc) is float: return CharColor(self.fore * inc, self.back", "str(self.fore) + \" \" + str(self.back) class Rect: \"\"\" Rect: Draw a rectangle", "def add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0]) height = 30 bar_width =", "if back == None : self.back = Color(0,0,0) elif type(back) == tuple and", "splits back element into two visible # parts. if A_left_dodged or A_right_dodged: #", "return Color(self.r + inc[0], self.g + inc[1], self.b + inc[2]) elif type(inc) ==", "self.col + pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row *", "if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l,", "Get reformed string and calculate max length for row in table: for cell", "max([max(c) for c in table]) # Get reformed string and calculate max length", "2-tuple\") elif type(inc) is float: return CharColor(self.fore * inc, self.back * inc) else:", "3: return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type must", "type must be tuple\") def __mul__(self, inc): if type(inc) == tuple: if len(inc)", "Color(self.r + inc[0], self.g + inc[1], self.b + inc[2]) elif type(inc) == Color:", "Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add a thermometer on the", "+ inc.g, self.b + inc.b) elif type(inc) == int: return Color(self.r + inc,", "def __init__(self, pos, color, text): self.pos = pos self.color = color self.text =", "len(back) == 3: self.back = Color(*back) else: self.back = back def __add__(self, inc):", "float: return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type must", "x_rep == 0: tick_char = u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color,", "= cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \" + cell + \" \"))", "+ 10 for line in range(1, len(table) * 3 + 6): pos =", "__add__(self, pos): return Pos(self.row + pos.row, self.col + pos.col) def __mul__(self, pos_time): if", "3: self.back = Color(*back) else: self.back = back def __add__(self, inc): if type(inc)", "* inc, self.back * inc) else: raise TypeError(\"operand type must be either 3-tuple", "/ 2) color = color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line", "(1, cell_size) + anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back,", "tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in range(1,size.col):", "+ inc, self.back + inc) else: raise TypeError(\"operand type must be tuple\") def", "/ 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0)", "2 if anchor is None: anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size", "return Color(self.r + inc.r, self.g + inc.g, self.b + inc.b) elif type(inc) ==", "= Pos(line, thermo_left) + anchor back = color_func(1.0 - 1.0 * line /", "anchor=None): cell_size = 0 min_cell = min([min(c) for c in table]) max_cell =", "Pos(row_num*3+2, col_num), True) # Add a thermometer on the right side thermo_left =", "and len(inc) == 3: return Color(self.r * inc[0], self.g * inc[1], self.b *", "not None: if (l + x_off) % x_rep == 0: tick_char = u\"├\"", "color_func, anchor=None): cell_size = 0 min_cell = min([min(c) for c in table]) max_cell", "2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor", "\")) self.current_line += len(table)*3 + 4 def add_hist(self, hist, color_func, anchor=None): max_val =", "(A_left_shaded and A_right_shaded): # fully shaded return [] else: # partially shaded if", "cell_size) + anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back)", "and content only. \"\"\" def __init__(self, pos, color, text): self.pos = pos self.color", "text): self.pos = pos self.color = color self.text = text class Canvas: rows,", "is tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1]) return Pos(self.row * pos_time.row,", "* inc[0], self.g * inc[1], self.b * inc[2]) elif type(inc) == float: return", "if anchor is None: anchor = Pos(self.current_line, (self.cols - len(text)) / 2) color", "type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1]) return Pos(self.row *", "fore element splits back element into two visible # parts. if A_left_dodged or", "render_line(self, line_num, is_reset=False): \"\"\" render elements in single line \"\"\" # Find all", "and len(inc) == 3: return Color(self.r + inc[0], self.g + inc[1], self.b +", "color, \" \" + cell + \" \")) # Add each cell into", "doesn't overlap # 2. shaded: fore element shaded at left or right bound", "TypeError(\"operand type must be tuple\") def __mul__(self, inc): if type(inc) == tuple: if", "0 def add_text(self, text, color, anchor=None): if anchor is None: anchor = Pos(self.current_line,", "content only. \"\"\" def __init__(self, pos, color, text): self.pos = pos self.color =", "cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def", "elif type(back) == tuple and len(back) == 3: self.back = Color(*back) else: self.back", "for line in range(height): for ith, val in enumerate(hist[0]): pos = Pos(line, ith*bar_width)", "range(1,size.col): tick_char = u\"─\" if y_off is not None and y_rep is not", "bound of back # element. # 3. split: fore element splits back element", "visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain flatten both list and # tuple", "i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation", "for [row_num, row] in enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0,", "# and calculates the max cell length cell_anchor = anchor + Pos(2, 1)", "right side thermo_left = len(table[0]) * cell_size + 10 for line in range(1,", "line / (len(table) * 3+3)) color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color,", "add_cell(cell, anchor, pos, isBlank=False): pos = pos * (1, cell_size) + anchor +", "# parts. if A_left_dodged or A_right_dodged: # dodged return ((B_left, B_right, B_id),) elif", "g=c.back.g, b=c.back.b) return fore+back+text if __name__ == \"__main__\": c = Canvas() grid =", "= os.popen('stty size', 'r').read().split() return int(rows), int(columns) class Pos: def __init__(self, row, col):", "type(inc) == Color: return Color(self.r + inc.r, self.g + inc.g, self.b + inc.b)", "list and # tuple (and all iterables), thus we have to coat it", "= min([min(c) for c in table]) max_cell = max([max(c) for c in table])", "pos, color, text): self.pos = pos self.color = color self.text = text class", "color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in", "in enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor if height * (1 -", "\"%1.2f\" % cell if type(cell) is float else cell cell = cell.rjust(cell_size -", "+ \" \")) # Add each cell into element table # and calculates", "self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col),", "else: self.back = back def __add__(self, inc): if type(inc) == tuple: if len(inc)", "visible_check((A_left, A_right, _), (B_left, B_right, B_id)): # compare the left/right bound of new", "== 3: self.fore = Color(*fore) else: self.fore = fore if back == None", "and A_right_shaded): # fully shaded return [] else: # partially shaded if A_left_shaded:", "enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain flatten", "in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain", "0: tick_char = u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if", "0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self,", "self.b + inc) else: raise TypeError(\"operand type must be either 3-tuple or Color\")", "Pos(self.row * pos_time.row, self.col * pos_time.row) def __str__(self): return \"{%d, %d}\" % (self.row,", "= row self.col = col def __add__(self, pos): return Pos(self.row + pos.row, self.col", "5 if anchor is None: anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width)", "try: new_cell = \"%1.2f\" % cell except TypeError: new_cell = cell if cell_size", "text = elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col] strokes += self.stroke(text, color)", "+ Pos(2, 1) for [row_num, row] in enumerate(table): for [col_num, cell] in enumerate(row):", "size.col), color, u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size = 0 min_cell =", "* inc[2]) elif type(inc) == float: return Color(int(self.r * inc), int(self.g * inc),", "= anchor + Pos(2, 1) for [row_num, row] in enumerate(table): for [col_num, cell]", "to coat it with one more # tuple in order to maintain the", "flatten both list and # tuple (and all iterables), thus we have to", "= pos self.color = color self.text = text class Canvas: rows, cols =", "by Canvas. elems = [] # for successively adding elements current_line = 0", "+ 4 def add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0]) height = 30", "return ((A_right, B_right, B_id),) if A_right_shaded: return ((B_left, A_left, B_id),) for elem_i, elem", "= int(r) self.g = int(g) self.b = int(b) def __add__(self, inc): if type(inc)", "CharColor(back, back) * (1., 0.5) cell = \"\" if isBlank else \"%1.2f\" %", "* inc, self.back * inc) else: raise TypeError(\"operand type must be tuple\") def", "and text content records position, size, color and content only. \"\"\" def __init__(self,", "else cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \" \" + cell", "%d}\" % (self.row, self.col) class Color: def __init__(self, r, g, b): self.r =", "\"\"\" Rect: Draw a rectangle area with given fore/back color and text content", "or int\") def __str__(self): return \"{%d, %d, %d}\" % (self.r, self.g, self.b) class", "part) visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain flatten both list and #", "if height * (1 - val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color,", "rows, cols = get_term_size() # graphic elements hold by Canvas. elems = []", "u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\"))", "= (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i, part in enumerate(visible_parts): visible_parts[i] =", "max(hist[0]) height = 30 bar_width = 5 if anchor is None: anchor =", "hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This", "sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE =", "# tuple (and all iterables), thus we have to coat it with one", "3 + 6): pos = Pos(line, thermo_left) + anchor back = color_func(1.0 -", "= Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5),", "shaded return [] else: # partially shaded if A_left_shaded: return ((A_right, B_right, B_id),)", "self.back * inc) else: raise TypeError(\"operand type must be tuple\") def __str__(self): return", "inc[1], self.b + inc[2]) elif type(inc) == Color: return Color(self.r + inc.r, self.g", "enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor if height * (1 - val/max_val)", "\"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None,", "self.b) class CharColor: def __init__(self, fore, back=None): if type(fore) == tuple and len(fore)", "= '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g,", "* line / (len(table) * 3+3)) color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos,", "text content records position, size, color and content only. \"\"\" def __init__(self, pos,", "-*- coding: utf-8 -*- import os import sys import itertools import numpy as", "back = color_func(1.0 - 1.0 * line / (len(table) * 3+3)) color =", "= list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles if no elements in", "color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\"))", "__str__(self): return \"{%d, %d}\" % (self.row, self.col) class Color: def __init__(self, r, g,", "(self.r, self.g, self.b) class CharColor: def __init__(self, fore, back=None): if type(fore) == tuple", "- 1.0 * line / (len(table) * 3+3)) color = CharColor(Color(0, 0, 0),", "== tuple and len(fore) == 3: self.fore = Color(*fore) else: self.fore = fore", "import color_func def get_term_size(): rows, columns = os.popen('stty size', 'r').read().split() return int(rows), int(columns)", "each # existing bound. A_left_shaded = A_left <= B_left A_right_shaded = A_right >=", "parts. if A_left_dodged or A_right_dodged: # dodged return ((B_left, B_right, B_id),) elif not", "text, c): COLOR_FORE = 38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore =", "cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell,", "self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)),", "g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__ ==", "# splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded): #", "line_num, is_reset=False): \"\"\" render elements in single line \"\"\" # Find all elements", "color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color,", "return Color(self.r + inc, self.g + inc, self.b + inc) else: raise TypeError(\"operand", "in self.elems if elem.pos.row == line_num] visible_parts = [] def visible_check((A_left, A_right, _),", "color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if", "B_right, B_id)) elif (A_left_shaded and A_right_shaded): # fully shaded return [] else: #", "+ inc) else: raise TypeError(\"operand type must be either 3-tuple or Color\") def", "+ \" \" + str(self.back) class Rect: \"\"\" Rect: Draw a rectangle area", "Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1., 0.5) cell", "inc, self.back * inc) else: raise TypeError(\"operand type must be either 3-tuple or", "len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor, x_rep=3,", "r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__", "max cell length cell_anchor = anchor + Pos(2, 1) for [row_num, row] in", "if type(inc) == inc and len(inc) == 3: return Color(self.r + inc[0], self.g", "== Color: return Color(self.r + inc.r, self.g + inc.g, self.b + inc.b) elif", "anchor is None: anchor = Pos(self.current_line, (self.cols - len(text)) / 2) color =", "line in range(height): for ith, val in enumerate(hist[0]): pos = Pos(line, ith*bar_width) +", "= max(hist[0]) height = 30 bar_width = 5 if anchor is None: anchor", "type(inc) == tuple and len(inc) == 3: return Color(self.r * inc[0], self.g *", "None: anchor = Pos(self.current_line, (self.cols - len(text)) / 2) color = color *", "of back # element. # 3. split: fore element splits back element into", "for elem in self.elems if elem.pos.row == line_num] visible_parts = [] def visible_check((A_left,", "TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif type(inc) is float: return", "in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color,", "split: fore element splits back element into two visible # parts. if A_left_dodged", "isBlank else \"%1.2f\" % cell if type(cell) is float else cell cell =", "line in range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK", "return CharColor(self.fore * inc[0], self.back * inc[1]) elif len(inc) == 3: return CharColor(self.fore", "int(g) self.b = int(b) def __add__(self, inc): if type(inc) == inc and len(inc)", "for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1,", "COLOR_FORE = 38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r,", "pos, isBlank=False): pos = pos * (1, cell_size) + anchor + Pos(0, 2)", "+ Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1., 0.5)", "or right bound of back # element. # 3. split: fore element splits", "y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255, 255)) for l in", "self.back + inc) else: raise TypeError(\"operand type must be tuple\") def __mul__(self, inc):", "B_id)): # compare the left/right bound of new element with each # existing", "color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size =", "- len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor,", "else: raise TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif type(inc) is", "(0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row,", "shading: # 1. dodged: the fore and back element doesn't overlap # 2.", "anchor, pos, isBlank=False): pos = pos * (1, cell_size) + anchor + Pos(0,", "- val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line", "__mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1])", "the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles if", "color = CharColor(back, back) * (1., 0.5) cell = \"\" if isBlank else", "g, b): self.r = int(r) self.g = int(g) self.b = int(b) def __add__(self,", "pos_time[0], self.col * pos_time[1]) return Pos(self.row * pos_time.row, self.col * pos_time.row) def __str__(self):", "y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2, 3) for line in range(height): for", "== 0: tick_char = u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char))", "render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset) def stroke(self, text,", "inc) else: raise TypeError(\"operand type must be tuple\") def __str__(self): return str(self.fore) +", "else \"%1.2f\" % cell if type(cell) is float else cell cell = cell.rjust(cell_size", "None and x_rep is not None: if (l + x_off) % x_rep ==", "pos = pos * (1, cell_size) + anchor + Pos(0, 2) back =", "sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255,", "elem.color text = elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col] strokes += self.stroke(text,", "+ cell + \" \")) # Add each cell into element table #", "anchor is None: anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2)", "(1., 0.5) cell = \"\" if isBlank else \"%1.2f\" % cell if type(cell)", "not (A_left_shaded or A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif", "y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos = pos * (1, cell_size) +", "# tuple in order to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts =", "# existing bound. A_left_shaded = A_left <= B_left A_right_shaded = A_right >= B_right", "inc): if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore * inc[0],", "cell_anchor, Pos(row_num*3+2, col_num), True) # Add a thermometer on the right side thermo_left", "(A_left_shaded or A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded", "* bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0,", "self.back = Color(0,0,0) elif type(back) == tuple and len(back) == 3: self.back =", "fore if back == None : self.back = Color(0,0,0) elif type(back) == tuple", "self.current_line += len(table)*3 + 4 def add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0])", "coding: utf-8 -*- import os import sys import itertools import numpy as np", "== 3: return Color(self.r * inc[0], self.g * inc[1], self.b * inc[2]) elif", "size', 'r').read().split() return int(rows), int(columns) class Pos: def __init__(self, row, col): self.row =", "elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i,", "self.stroke(text, color) strokes += COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\")", "into element table # and calculates the max cell length cell_anchor = anchor", "color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def add_empty_line(self,", "\" \")) self.current_line += len(table)*3 + 4 def add_hist(self, hist, color_func, anchor=None): max_val", "[] else \" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts:", "inc[0], self.g * inc[1], self.b * inc[2]) elif type(inc) == float: return Color(int(self.r", "color, u\"│\")) for l in range(1,size.col): tick_char = u\"─\" if y_off is not", "tuple and len(back) == 3: self.back = Color(*back) else: self.back = back def", "self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col),", "= len(new_cell) cell_size += 2 if anchor is None: anchor = Pos(self.current_line, (self.cols", "= A_right < B_left A_right_dodged = A_left > B_right # Four cases of", "dodged return ((B_left, B_right, B_id),) elif not (A_left_shaded or A_right_shaded): # splitted return", "two visible # parts. if A_left_dodged or A_right_dodged: # dodged return ((B_left, B_right,", "fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return", "as np from color_schemes import color_func def get_term_size(): rows, columns = os.popen('stty size',", "10 for line in range(1, len(table) * 3 + 6): pos = Pos(line,", "color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table,", "+ pos.row, self.col + pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple: return", "len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2,", "== tuple: if len(inc) == 2: return CharColor(self.fore + inc[0], self.back + inc[1])", "and calculate max length for row in table: for cell in row: try:", "range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\" if x_off is not None", "CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type must be either", "cell = \"\" if isBlank else \"%1.2f\" % cell if type(cell) is float", "anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) * (1.,", "0: tick_char = u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if", "cell_size = len(new_cell) cell_size += 2 if anchor is None: anchor = Pos(self.current_line,", "inc[2]) elif type(inc) == Color: return Color(self.r + inc.r, self.g + inc.g, self.b", "cols = get_term_size() # graphic elements hold by Canvas. elems = [] #", "tuple (and all iterables), thus we have to coat it with one more", "inc) else: raise TypeError(\"operand type must be tuple\") def __mul__(self, inc): if type(inc)", "not None and y_rep is not None: if (l + y_off) % y_rep", "= cell if cell_size < len(new_cell): cell_size = len(new_cell) cell_size += 2 if", "3: return Color(self.r + inc[0], self.g + inc[1], self.b + inc[2]) elif type(inc)", "self.b + inc[2]) elif type(inc) == Color: return Color(self.r + inc.r, self.g +", "add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0]) height = 30 bar_width = 5", "y_rep is not None: if (l + y_off) % y_rep == 0: tick_char", "= Canvas() grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a", "tuple\") def __str__(self): return str(self.fore) + \" \" + str(self.back) class Rect: \"\"\"", "self.b * inc[2]) elif type(inc) == float: return Color(int(self.r * inc), int(self.g *", "B_id),) elif not (A_left_shaded or A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right, B_right,", "\"\"\" # Find all elements to be rendered in current line elems_inline =", "maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles", "== 3: return CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type", "CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None,", "color = color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2", "area with given fore/back color and text content records position, size, color and", "self.color = color self.text = text class Canvas: rows, cols = get_term_size() #", "col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add a thermometer on the right", "cell if cell_size < len(new_cell): cell_size = len(new_cell) cell_size += 2 if anchor", "for line in range(1, len(table) * 3 + 6): pos = Pos(line, thermo_left)", "successively adding elements current_line = 0 def add_text(self, text, color, anchor=None): if anchor", "B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i)", "3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor +", "0.5) cell = \"\" if isBlank else \"%1.2f\" % cell if type(cell) is", "__init__(self, r, g, b): self.r = int(r) self.g = int(g) self.b = int(b)", "0) + anchor) tick_char = u\"│\" if x_off is not None and x_rep", "elem.pos.col + len(elem.text), elem_i) for i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part)", "l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color,", "key=lambda x:x[0]) # handles if no elements in this line strokes = \"\"", "elem.pos.col : part[1] - elem.pos.col] strokes += self.stroke(text, color) strokes += COLOR_RESET if", "in range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK =", "os.popen('stty size', 'r').read().split() return int(rows), int(columns) class Pos: def __init__(self, row, col): self.row", "self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for", "row: try: new_cell = \"%1.2f\" % cell except TypeError: new_cell = cell if", "= len(table[0]) * cell_size + 10 for line in range(1, len(table) * 3", "be either 3-tuple or Color\") def __mul__(self, inc): if type(inc) == tuple and", "anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos = pos", "Add each cell into element table # and calculates the max cell length", "bar_width = 5 if anchor is None: anchor = Pos(self.current_line, (self.cols - len(hist[0])", "self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"),", "bound of new element with each # existing bound. A_left_shaded = A_left <=", "table: for cell in row: try: new_cell = \"%1.2f\" % cell except TypeError:", "((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded): # fully shaded return", "class Color: def __init__(self, r, g, b): self.r = int(r) self.g = int(g)", "True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add a", "or Color\") def __mul__(self, inc): if type(inc) == tuple and len(inc) == 3:", "right bound of back # element. # 3. split: fore element splits back", "\" \")) # Add each cell into element table # and calculates the", "be tuple\") def __str__(self): return str(self.fore) + \" \" + str(self.back) class Rect:", "return str(self.fore) + \" \" + str(self.back) class Rect: \"\"\" Rect: Draw a", "* inc), int(self.g * inc), int(self.b * inc)) else: raise TypeError(\"operand type must", "B_right A_left_dodged = A_right < B_left A_right_dodged = A_left > B_right # Four", "* 3+3)) color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line", "is_reset) def stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK = 48 color_seq =", "= CharColor((255, 255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char", "max_cell = max([max(c) for c in table]) # Get reformed string and calculate", "* inc) else: raise TypeError(\"operand type must be tuple\") def __str__(self): return str(self.fore)", "must be tuple\") def __str__(self): return str(self.fore) + \" \" + str(self.back) class", "we have to coat it with one more # tuple in order to", "# fully shaded return [] else: # partially shaded if A_left_shaded: return ((A_right,", "= CharColor(back, back) * (1., 0.5) cell = \"\" if isBlank else \"%1.2f\"", "1) for [row_num, row] in enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor,", "hist, color_func, anchor=None): max_val = max(hist[0]) height = 30 bar_width = 5 if", "type(inc) == int: return Color(self.r + inc, self.g + inc, self.b + inc)", "must be either 3-tuple or 2-tuple\") elif type(inc) is int: return CharColor(self.fore +", "anchor) tick_char = u\"│\" if x_off is not None and x_rep is not", "operation by itertools.chain flatten both list and # tuple (and all iterables), thus", "in range(1, len(table) * 3 + 6): pos = Pos(line, thermo_left) + anchor", "for part in visible_parts: elem = elems_inline[part[2]] color = elem.color text = elem.text[part[0]", "\" \" + str(self.back) class Rect: \"\"\" Rect: Draw a rectangle area with", "if type(cell) is float else cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color,", "in table]) # Get reformed string and calculate max length for row in", "type(cell) is float else cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos, color, \"", "B_id)) elif (A_left_shaded and A_right_shaded): # fully shaded return [] else: # partially", "len(new_cell) cell_size += 2 if anchor is None: anchor = Pos(self.current_line, (self.cols -", "len(inc) == 3: return Color(self.r + inc[0], self.g + inc[1], self.b + inc[2])", "color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__ == \"__main__\": c = Canvas()", "self.r = int(r) self.g = int(g) self.b = int(b) def __add__(self, inc): if", "elems = [] # for successively adding elements current_line = 0 def add_text(self,", "color, \" \")) self.current_line += len(table)*3 + 4 def add_hist(self, hist, color_func, anchor=None):", "is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c):", "[] # for successively adding elements current_line = 0 def add_text(self, text, color,", "back # element. # 3. split: fore element splits back element into two", "position, size, color and content only. \"\"\" def __init__(self, pos, color, text): self.pos", "+ x_off) % x_rep == 0: tick_char = u\"├\" if \"left\" in sides:", "__init__(self, row, col): self.row = row self.col = col def __add__(self, pos): return", "np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a", "% x_rep == 0: tick_char = u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor,", "def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\",", "of shading: # 1. dodged: the fore and back element doesn't overlap #", "= 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back =", "== None : self.back = Color(0,0,0) elif type(back) == tuple and len(back) ==", "= int(b) def __add__(self, inc): if type(inc) == inc and len(inc) == 3:", "current_line = 0 def add_text(self, text, color, anchor=None): if anchor is None: anchor", "* pos_time[0], self.col * pos_time[1]) return Pos(self.row * pos_time.row, self.col * pos_time.row) def", "\"{%d, %d}\" % (self.row, self.col) class Color: def __init__(self, r, g, b): self.r", "A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded): # fully shaded return []", "elif len(inc) == 3: return CharColor(self.fore * inc, self.back * inc) else: raise", "Four cases of shading: # 1. dodged: the fore and back element doesn't", "element with each # existing bound. A_left_shaded = A_left <= B_left A_right_shaded =", "in single line \"\"\" # Find all elements to be rendered in current", "be either 3-tuple or int\") def __str__(self): return \"{%d, %d, %d}\" % (self.r,", "= Color(*fore) else: self.fore = fore if back == None : self.back =", "Find all elements to be rendered in current line elems_inline = [elem for", "None: anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height +", "x_off is not None and x_rep is not None: if (l + x_off)", "Add a thermometer on the right side thermo_left = len(table[0]) * cell_size +", "pos_time[1]) return Pos(self.row * pos_time.row, self.col * pos_time.row) def __str__(self): return \"{%d, %d}\"", "< line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line += 30", "in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\" if x_off is not", "len(fore) == 3: self.fore = Color(*fore) else: self.fore = fore if back ==", "self.fore = fore if back == None : self.back = Color(0,0,0) elif type(back)", "def add_grid(self, table, color_func, anchor=None): cell_size = 0 min_cell = min([min(c) for c", "None : self.back = Color(0,0,0) elif type(back) == tuple and len(back) == 3:", "tuple\") def __mul__(self, inc): if type(inc) == tuple: if len(inc) == 2: return", "string and calculate max length for row in table: for cell in row:", "+= COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False):", "__add__(self, inc): if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore +", "return int(rows), int(columns) class Pos: def __init__(self, row, col): self.row = row self.col", "self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line += 30 def render_line(self, line_num, is_reset=False): \"\"\"", "inc, self.b + inc) else: raise TypeError(\"operand type must be either 3-tuple or", "r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__ == \"__main__\": c = Canvas() grid", "cell_size + 10 for line in range(1, len(table) * 3 + 6): pos", "tick_char = u\"│\" if x_off is not None and x_rep is not None:", "# element. # 3. split: fore element splits back element into two visible", "return Color(int(self.r * inc), int(self.g * inc), int(self.b * inc)) else: raise TypeError(\"operand", "y_off is not None and y_rep is not None: if (l + y_off)", "in table]) max_cell = max([max(c) for c in table]) # Get reformed string", "x_off=None, y_off=None): color = CharColor((255, 255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0)", "Pos(2, 1) for [row_num, row] in enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell,", "2: return CharColor(self.fore * inc[0], self.back * inc[1]) elif len(inc) == 3: return", "len(new_cell): cell_size = len(new_cell) cell_size += 2 if anchor is None: anchor =", "+ inc[0], self.g + inc[1], self.b + inc[2]) elif type(inc) == Color: return", "must be tuple\") def __mul__(self, inc): if type(inc) == tuple: if len(inc) ==", "Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) #", "* pos_time[1]) return Pos(self.row * pos_time.row, self.col * pos_time.row) def __str__(self): return \"{%d,", "3+3)) color = CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line +=", "for cell in row: try: new_cell = \"%1.2f\" % cell except TypeError: new_cell", "+= 2 if anchor is None: anchor = Pos(self.current_line, (self.cols - len(table[0]) *", "int(columns) class Pos: def __init__(self, row, col): self.row = row self.col = col", "__str__(self): return \"{%d, %d, %d}\" % (self.r, self.g, self.b) class CharColor: def __init__(self,", "sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows):", "Pos(2, 3) for line in range(height): for ith, val in enumerate(hist[0]): pos =", "Rect: Draw a rectangle area with given fore/back color and text content records", "else: raise TypeError(\"operand type must be tuple\") def __str__(self): return str(self.fore) + \"", "self.back + inc) else: raise TypeError(\"operand type must be either 3-tuple or 2-tuple\")", "color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r,", "tick_char = u\"─\" if y_off is not None and y_rep is not None:", "\"\" if visible_parts == [] else \" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02'", "\"{%d, %d, %d}\" % (self.r, self.g, self.b) class CharColor: def __init__(self, fore, back=None):", "anchor is None: anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7)", "pos_time.row, self.col * pos_time.row) def __str__(self): return \"{%d, %d}\" % (self.row, self.col) class", "line in range(1, len(table) * 3 + 6): pos = Pos(line, thermo_left) +", "os import sys import itertools import numpy as np from color_schemes import color_func", "def __add__(self, inc): if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore", "anchor + Pos(2, 1) for [row_num, row] in enumerate(table): for [col_num, cell] in", "> B_right # Four cases of shading: # 1. dodged: the fore and", "'r').read().split() return int(rows), int(columns) class Pos: def __init__(self, row, col): self.row = row", "elif type(inc) == int: return Color(self.r + inc, self.g + inc, self.b +", "type must be either 3-tuple or 2-tuple\") elif type(inc) is int: return CharColor(self.fore", "\" \" + cell + \" \")) # Add each cell into element", "Color\") def __mul__(self, inc): if type(inc) == tuple and len(inc) == 3: return", "cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add a thermometer on", "import os import sys import itertools import numpy as np from color_schemes import", "is None: anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7) /", "in current line elems_inline = [elem for elem in self.elems if elem.pos.row ==", "elements current_line = 0 def add_text(self, text, color, anchor=None): if anchor is None:", "tick_char = u\"├\" if \"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\"", "TypeError(\"operand type must be tuple\") def __str__(self): return str(self.fore) + \" \" +", "+ inc) else: raise TypeError(\"operand type must be tuple\") def __mul__(self, inc): if", "y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos = pos * (1, cell_size)", "elem = elems_inline[part[2]] color = elem.color text = elem.text[part[0] - elem.pos.col : part[1]", "* 3 + 6): pos = Pos(line, thermo_left) + anchor back = color_func(1.0", "element splits back element into two visible # parts. if A_left_dodged or A_right_dodged:", "self.g = int(g) self.b = int(b) def __add__(self, inc): if type(inc) == inc", "Pos(self.row + pos.row, self.col + pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple:", "+ inc[0], self.back + inc[1]) elif len(inc) == 3: return CharColor(self.fore + inc,", "0, 0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3 + 4 def", "tick_char = u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\"", "hist_anchor if height * (1 - val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos,", "part[1] - elem.pos.col] strokes += self.stroke(text, color) strokes += COLOR_RESET if is_reset else", "3-tuple or int\") def __str__(self): return \"{%d, %d, %d}\" % (self.r, self.g, self.b)", "+ 6): pos = Pos(line, thermo_left) + anchor back = color_func(1.0 - 1.0", "col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num)) add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add", "bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width,", "type(inc) == inc and len(inc) == 3: return Color(self.r + inc[0], self.g +", "type(back) == tuple and len(back) == 3: self.back = Color(*back) else: self.back =", "all iterables), thus we have to coat it with one more # tuple", "in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color,", "A_right < B_left A_right_dodged = A_left > B_right # Four cases of shading:", "shaded if A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded: return ((B_left, A_left, B_id),)", "(self.cols - len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5),", "overlap # 2. shaded: fore element shaded at left or right bound of", "size, color and content only. \"\"\" def __init__(self, pos, color, text): self.pos =", "inc[0], self.back + inc[1]) elif len(inc) == 3: return CharColor(self.fore + inc, self.back", "back=None): if type(fore) == tuple and len(fore) == 3: self.fore = Color(*fore) else:", "elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i, part in enumerate(visible_parts): visible_parts[i]", "range(1, len(table) * 3 + 6): pos = Pos(line, thermo_left) + anchor back", "columns = os.popen('stty size', 'r').read().split() return int(rows), int(columns) class Pos: def __init__(self, row,", "self.back + inc[1]) elif len(inc) == 3: return CharColor(self.fore + inc, self.back +", "y_rep == 0: tick_char = u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color,", "pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\",", "B_right # Four cases of shading: # 1. dodged: the fore and back", "anchor back = color_func(1.0 - 1.0 * line / (len(table) * 3+3)) color", "CharColor(self.fore + inc[0], self.back + inc[1]) elif len(inc) == 3: return CharColor(self.fore +", "color, u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size = 0 min_cell = min([min(c)", "Color: def __init__(self, r, g, b): self.r = int(r) self.g = int(g) self.b", "+ pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row * pos_time[0],", "(B_left, B_right, B_id)): # compare the left/right bound of new element with each", "+ 3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor", "x_rep is not None: if (l + x_off) % x_rep == 0: tick_char", "def add_text(self, text, color, anchor=None): if anchor is None: anchor = Pos(self.current_line, (self.cols", "and calculates the max cell length cell_anchor = anchor + Pos(2, 1) for", "a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a histogram example\", CharColor(color_func[\"BlueGreenYellow\"](0.9))) #c.add_hist(grid.tolist(),", "col): self.row = row self.col = col def __add__(self, pos): return Pos(self.row +", "self.pos = pos self.color = color self.text = text class Canvas: rows, cols", "= u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in", "import sys import itertools import numpy as np from color_schemes import color_func def", "0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None,", "c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a histogram example\",", "get_term_size() # graphic elements hold by Canvas. elems = [] # for successively", "self.fore = Color(*fore) else: self.fore = fore if back == None : self.back", "%d}\" % (self.r, self.g, self.b) class CharColor: def __init__(self, fore, back=None): if type(fore)", "self.elems.append(Rect(pos, color, \" \" + cell + \" \")) # Add each cell", "return ((B_left, A_left, B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded): # fully shaded", "len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos =", "visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain flatten both list and", "int(self.b * inc)) else: raise TypeError(\"operand type must be either 3-tuple or int\")", "int(rows), int(columns) class Pos: def __init__(self, row, col): self.row = row self.col =", "tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0,", "anchor=None): if anchor is None: anchor = Pos(self.current_line, (self.cols - len(text)) / 2)", "CharColor(color, color*2), \" \")) self.current_line += 30 def render_line(self, line_num, is_reset=False): \"\"\" render", "and # tuple (and all iterables), thus we have to coat it with", "# for successively adding elements current_line = 0 def add_text(self, text, color, anchor=None):", "len(text)) / 2) color = color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text))", "cases of shading: # 1. dodged: the fore and back element doesn't overlap", "graphic elements hold by Canvas. elems = [] # for successively adding elements", "size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color", "more # tuple in order to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts", "is None: anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2) self.add_frame(Pos(height", "self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor,", "= u\"│\" if x_off is not None and x_rep is not None: if", "if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore * inc[0], self.back", "for c in table]) # Get reformed string and calculate max length for", "= [] def visible_check((A_left, A_right, _), (B_left, B_right, B_id)): # compare the left/right", "% cell if type(cell) is float else cell cell = cell.rjust(cell_size - 2)", "+ len(elem.text), elem_i) for i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,))", "len(inc) == 2: return CharColor(self.fore * inc[0], self.back * inc[1]) elif len(inc) ==", "left/right bound of new element with each # existing bound. A_left_shaded = A_left", "in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in range(1,size.col): tick_char = u\"─\"", "anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3,", "u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size = 0 min_cell = min([min(c) for", "element into two visible # parts. if A_left_dodged or A_right_dodged: # dodged return", "B_right, B_id)): # compare the left/right bound of new element with each #", "\"left\" in sides: self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor,", "# Four cases of shading: # 1. dodged: the fore and back element", "x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos = pos *", "len(table[0]) * cell_size + 10 for line in range(1, len(table) * 3 +", "# Get reformed string and calculate max length for row in table: for", "= '\\x01\\x1b[0m\\x02' for part in visible_parts: elem = elems_inline[part[2]] color = elem.color text", "text)) self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def", "hist_anchor = anchor + Pos(2, 3) for line in range(height): for ith, val", "len(inc) == 3: return Color(self.r * inc[0], self.g * inc[1], self.b * inc[2])", "B_right, B_id),) elif not (A_left_shaded or A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right,", "elif type(inc) is float: return CharColor(self.fore * inc, self.back * inc) else: raise", "3: self.fore = Color(*fore) else: self.fore = fore if back == None :", "line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line += 30 def", "type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore + inc[0], self.back +", "CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type must be either", "7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor,", "38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b)", "self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def", "add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None):", "== tuple: if len(inc) == 2: return CharColor(self.fore * inc[0], self.back * inc[1])", "+ Pos(2, 3) for line in range(height): for ith, val in enumerate(hist[0]): pos", "A_left <= B_left A_right_shaded = A_right >= B_right A_left_dodged = A_right < B_left", "= [] # for successively adding elements current_line = 0 def add_text(self, text,", "given fore/back color and text content records position, size, color and content only.", "= color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line += 2 def", "val in enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor if height * (1", "row, col): self.row = row self.col = col def __add__(self, pos): return Pos(self.row", "records position, size, color and content only. \"\"\" def __init__(self, pos, color, text):", "enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor,", "be tuple\") def __mul__(self, inc): if type(inc) == tuple: if len(inc) == 2:", "itertools.chain flatten both list and # tuple (and all iterables), thus we have", "color self.text = text class Canvas: rows, cols = get_term_size() # graphic elements", "fore+back+text if __name__ == \"__main__\": c = Canvas() grid = np.random.random_sample(((7, 10))) hist", "B_id),) if A_right_shaded: return ((B_left, A_left, B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound", "only. \"\"\" def __init__(self, pos, color, text): self.pos = pos self.color = color", "+ inc.b) elif type(inc) == int: return Color(self.r + inc, self.g + inc,", "val/max_val) < line: color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line +=", "strokes = \"\" if visible_parts == [] else \" \" * visible_parts[0][0] COLOR_RESET", "pos self.color = color self.text = text class Canvas: rows, cols = get_term_size()", "self.g * inc[1], self.b * inc[2]) elif type(inc) == float: return Color(int(self.r *", "for l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\" if x_off", "\" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None,", "+ anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell)) color = CharColor(back, back) *", "= np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is", "if __name__ == \"__main__\": c = Canvas() grid = np.random.random_sample(((7, 10))) hist =", "color = CharColor((255, 255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor)", "__add__(self, inc): if type(inc) == inc and len(inc) == 3: return Color(self.r +", "None: if (l + x_off) % x_rep == 0: tick_char = u\"├\" if", "either 3-tuple or 2-tuple\") elif type(inc) is int: return CharColor(self.fore + inc, self.back", "dodged: the fore and back element doesn't overlap # 2. shaded: fore element", "color = color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line += 30 def render_line(self,", "0)+anchor, color, tick_char)) if \"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l", "in row: try: new_cell = \"%1.2f\" % cell except TypeError: new_cell = cell", "rendered in current line elems_inline = [elem for elem in self.elems if elem.pos.row", "cell + \" \")) # Add each cell into element table # and", "ith, val in enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor if height *", "# dodged return ((B_left, B_right, B_id),) elif not (A_left_shaded or A_right_shaded): # splitted", "Pos: def __init__(self, row, col): self.row = row self.col = col def __add__(self,", "color = elem.color text = elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col] strokes", "Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor,", "new_cell = \"%1.2f\" % cell except TypeError: new_cell = cell if cell_size <", "self.b = int(b) def __add__(self, inc): if type(inc) == inc and len(inc) ==", "= get_term_size() # graphic elements hold by Canvas. elems = [] # for", "coat it with one more # tuple in order to maintain the form.", "2: return CharColor(self.fore + inc[0], self.back + inc[1]) elif len(inc) == 3: return", "Canvas. elems = [] # for successively adding elements current_line = 0 def", "def __init__(self, fore, back=None): if type(fore) == tuple and len(fore) == 3: self.fore", "tuple: if len(inc) == 2: return CharColor(self.fore + inc[0], self.back + inc[1]) elif", "__str__(self): return str(self.fore) + \" \" + str(self.back) class Rect: \"\"\" Rect: Draw", "((A_right, B_right, B_id),) if A_right_shaded: return ((B_left, A_left, B_id),) for elem_i, elem in", "back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text if __name__ == \"__main__\": c", "* inc), int(self.b * inc)) else: raise TypeError(\"operand type must be either 3-tuple", "cell into element table # and calculates the max cell length cell_anchor =", "Color(*fore) else: self.fore = fore if back == None : self.back = Color(0,0,0)", "cell in row: try: new_cell = \"%1.2f\" % cell except TypeError: new_cell =", "anchor, sides=(\"left\", \"right\", \"top\", \"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color =", "- elem.pos.col] strokes += self.stroke(text, color) strokes += COLOR_RESET if is_reset else \"\"", "be either 3-tuple or 2-tuple\") elif type(inc) is int: return CharColor(self.fore + inc,", "A_left_shaded = A_left <= B_left A_right_shaded = A_right >= B_right A_left_dodged = A_right", "line strokes = \"\" if visible_parts == [] else \" \" * visible_parts[0][0]", "u\"┘\")) self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size = 0", "strokes += COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET) sys.stdout.write(\"\\n\") def render(self,", "cell if type(cell) is float else cell cell = cell.rjust(cell_size - 2) self.elems.append(Rect(pos,", "color, text)) self.current_line += 2 def add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols))", "* visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts: elem = elems_inline[part[2]] color", "- len(text)) / 2) color = color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color,", "* (1., 0.5) cell = \"\" if isBlank else \"%1.2f\" % cell if", "a rectangle area with given fore/back color and text content records position, size,", "is not None and y_rep is not None: if (l + y_off) %", "x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2, 3) for line in range(height):", "def get_term_size(): rows, columns = os.popen('stty size', 'r').read().split() return int(rows), int(columns) class Pos:", "return Color(self.r * inc[0], self.g * inc[1], self.b * inc[2]) elif type(inc) ==", "if \"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0),", "Pos(self.current_line, (self.cols - len(text)) / 2) color = color * (0.5, 1.) self.add_empty_line(anchor)", "A_left > B_right # Four cases of shading: # 1. dodged: the fore", "partially shaded if A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded: return ((B_left, A_left,", "this line strokes = \"\" if visible_parts == [] else \" \" *", "elements to be rendered in current line elems_inline = [elem for elem in", "self.col) class Color: def __init__(self, r, g, b): self.r = int(r) self.g =", "tuple and len(inc) == 3: return Color(self.r * inc[0], self.g * inc[1], self.b", "color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\")) self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u\"┘\"))", "CharColor(Color(0, 0, 0), back) self.elems.append(Rect(pos, color, \" \")) self.current_line += len(table)*3 + 4", "not None: if (l + y_off) % y_rep == 0: tick_char = u\"┴\"", "self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0, y_rep=cell_size, y_off=0) def add_cell(cell, anchor, pos, isBlank=False): pos", "else: # partially shaded if A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded: return", "the left/right bound of new element with each # existing bound. A_left_shaded =", "both list and # tuple (and all iterables), thus we have to coat", "range(height): for ith, val in enumerate(hist[0]): pos = Pos(line, ith*bar_width) + hist_anchor if", ">= B_right A_left_dodged = A_right < B_left A_right_dodged = A_left > B_right #", "[] else: # partially shaded if A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded:", "heatmap example\", CharColor(color_func[\"Plum\"](0.9))) c.add_grid(grid.tolist(), color_func[\"Plum\"]) c.add_text(\"This is a histogram example\", CharColor(color_func[\"BlueGreenYellow\"](0.9))) #c.add_hist(grid.tolist(), color_func[\"BlueGreenYellow\"])", "inc[2]) elif type(inc) == float: return Color(int(self.r * inc), int(self.g * inc), int(self.b", "add_text(self, text, color, anchor=None): if anchor is None: anchor = Pos(self.current_line, (self.cols -", "numpy as np from color_schemes import color_func def get_term_size(): rows, columns = os.popen('stty", "def __init__(self, r, g, b): self.r = int(r) self.g = int(g) self.b =", "u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0, l)+anchor, color, u\"─\")) if \"bottom\" in sides:", "A_left_shaded: return ((A_right, B_right, B_id),) if A_right_shaded: return ((B_left, A_left, B_id),) for elem_i,", "self.elems if elem.pos.row == line_num] visible_parts = [] def visible_check((A_left, A_right, _), (B_left,", "size.col)+anchor, color, u\"│\")) for l in range(1,size.col): tick_char = u\"─\" if y_off is", "Color(self.r + inc, self.g + inc, self.b + inc) else: raise TypeError(\"operand type", "c in table]) max_cell = max([max(c) for c in table]) # Get reformed", "thermometer on the right side thermo_left = len(table[0]) * cell_size + 10 for", "bound. A_left_shaded = A_left <= B_left A_right_shaded = A_right >= B_right A_left_dodged =", "self.g + inc[1], self.b + inc[2]) elif type(inc) == Color: return Color(self.r +", "for line in range(self.rows): self.render_line(line, is_reset) def stroke(self, text, c): COLOR_FORE = 38", "def __add__(self, pos): return Pos(self.row + pos.row, self.col + pos.col) def __mul__(self, pos_time):", "cell_size += 2 if anchor is None: anchor = Pos(self.current_line, (self.cols - len(table[0])", "table, color_func, anchor=None): cell_size = 0 min_cell = min([min(c) for c in table])", "in order to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda", "fore, back=None): if type(fore) == tuple and len(fore) == 3: self.fore = Color(*fore)", "CharColor((255, 255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char =", "\"right\" in sides: self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in range(1,size.col): tick_char =", "calculates the max cell length cell_anchor = anchor + Pos(2, 1) for [row_num,", "flatten operation by itertools.chain flatten both list and # tuple (and all iterables),", "elems_inline[part[2]] color = elem.color text = elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col]", "[col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num))", "self.back = Color(*back) else: self.back = back def __add__(self, inc): if type(inc) ==", "len(inc) == 3: return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand", "elif len(inc) == 3: return CharColor(self.fore + inc, self.back + inc) else: raise", "visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles if no elements", "# list flatten operation by itertools.chain flatten both list and # tuple (and", "row in table: for cell in row: try: new_cell = \"%1.2f\" % cell", "elements in single line \"\"\" # Find all elements to be rendered in", "\"bottom\"), x_tick_range=None, y_tick_range=None, x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255, 255)) for", "element doesn't overlap # 2. shaded: fore element shaded at left or right", "is None: anchor = Pos(self.current_line, (self.cols - len(text)) / 2) color = color", "grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This is a heatmap example\",", "handles if no elements in this line strokes = \"\" if visible_parts ==", "anchor=None): max_val = max(hist[0]) height = 30 bar_width = 5 if anchor is", "'\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b)", "sys import itertools import numpy as np from color_schemes import color_func def get_term_size():", "= Color(*back) else: self.back = back def __add__(self, inc): if type(inc) == tuple:", "- len(table[0]) * cell_size - 7) / 3) self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor, x_rep=3, x_off=0,", "1.0 * line / (len(table) * 3+3)) color = CharColor(Color(0, 0, 0), back)", "def __mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col *", "* inc) else: raise TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif", "self.back * inc[1]) elif len(inc) == 3: return CharColor(self.fore * inc, self.back *", "CharColor(self.fore * inc[0], self.back * inc[1]) elif len(inc) == 3: return CharColor(self.fore *", "1. dodged: the fore and back element doesn't overlap # 2. shaded: fore", "class Pos: def __init__(self, row, col): self.row = row self.col = col def", "b): self.r = int(r) self.g = int(g) self.b = int(b) def __add__(self, inc):", "+ inc[2]) elif type(inc) == Color: return Color(self.r + inc.r, self.g + inc.g,", "if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore + inc[0], self.back", "elem in self.elems if elem.pos.row == line_num] visible_parts = [] def visible_check((A_left, A_right,", "y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l,", "__init__(self, pos, color, text): self.pos = pos self.color = color self.text = text", "B_right, B_id),) if A_right_shaded: return ((B_left, A_left, B_id),) for elem_i, elem in enumerate(elems_inline):", "= 5 if anchor is None: anchor = Pos(self.current_line, (self.cols - len(hist[0]) *", "table]) # Get reformed string and calculate max length for row in table:", "# 3. split: fore element splits back element into two visible # parts.", "element. # 3. split: fore element splits back element into two visible #", "if (l + x_off) % x_rep == 0: tick_char = u\"├\" if \"left\"", "y_off) % y_rep == 0: tick_char = u\"┴\" if \"top\" in sides: self.elems.append(Rect(Pos(0,", "# compare the left/right bound of new element with each # existing bound.", "= color_func(val/max_val) self.elems.append(Rect(pos, CharColor(color, color*2), \" \")) self.current_line += 30 def render_line(self, line_num,", "inc[0], self.g + inc[1], self.b + inc[2]) elif type(inc) == Color: return Color(self.r", "else: self.fore = fore if back == None : self.back = Color(0,0,0) elif", "c): COLOR_FORE = 38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE,", "== 2: return CharColor(self.fore * inc[0], self.back * inc[1]) elif len(inc) == 3:", "form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles if no", "= A_right >= B_right A_left_dodged = A_right < B_left A_right_dodged = A_left >", "inc): if type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore + inc[0],", "text, color, anchor=None): if anchor is None: anchor = Pos(self.current_line, (self.cols - len(text))", "if x_off is not None and x_rep is not None: if (l +", "if type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1]) return Pos(self.row", "thermo_left = len(table[0]) * cell_size + 10 for line in range(1, len(table) *", "and len(back) == 3: self.back = Color(*back) else: self.back = back def __add__(self,", "add_empty_line(self, pos): self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), \" \"*self.cols)) def add_frame(self, size, anchor, sides=(\"left\", \"right\",", "= sorted(visible_parts, key=lambda x:x[0]) # handles if no elements in this line strokes", "\"\" if isBlank else \"%1.2f\" % cell if type(cell) is float else cell", "type(inc) == tuple: if len(inc) == 2: return CharColor(self.fore * inc[0], self.back *", "+ COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line,", "< len(new_cell): cell_size = len(new_cell) cell_size += 2 if anchor is None: anchor", "add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True) # Add a thermometer on the right side", "Color: return Color(self.r + inc.r, self.g + inc.g, self.b + inc.b) elif type(inc)", "= [elem for elem in self.elems if elem.pos.row == line_num] visible_parts = []", "= fore if back == None : self.back = Color(0,0,0) elif type(back) ==", "elements in this line strokes = \"\" if visible_parts == [] else \"", "2) color = color * (0.5, 1.) self.add_empty_line(anchor) self.elems.append(Rect(anchor, color, text)) self.current_line +=", "= color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore+back+text", "30 def render_line(self, line_num, is_reset=False): \"\"\" render elements in single line \"\"\" #", "(self.row, self.col) class Color: def __init__(self, r, g, b): self.r = int(r) self.g", "y_off=None): color = CharColor((255, 255, 255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0) +", "== 2: return CharColor(self.fore + inc[0], self.back + inc[1]) elif len(inc) == 3:", "<filename>src/congram.py # -*- coding: utf-8 -*- import os import sys import itertools import", "COLOR_RESET) sys.stdout.write(\"\\n\") def render(self, is_reset=False): sys.stdout.flush() sys.stdout.write(\"\\n\") for line in range(self.rows): self.render_line(line, is_reset)", "inc.g, self.b + inc.b) elif type(inc) == int: return Color(self.r + inc, self.g", "self.current_line += 30 def render_line(self, line_num, is_reset=False): \"\"\" render elements in single line", "int(self.g * inc), int(self.b * inc)) else: raise TypeError(\"operand type must be either", "+ inc) else: raise TypeError(\"operand type must be either 3-tuple or 2-tuple\") elif", "(l + x_off) % x_rep == 0: tick_char = u\"├\" if \"left\" in", "B_id),(A_right, B_right, B_id)) elif (A_left_shaded and A_right_shaded): # fully shaded return [] else:", "x_rep=None, y_rep=None, x_off=None, y_off=None): color = CharColor((255, 255, 255)) for l in range(size.row+1):", "with each # existing bound. A_left_shaded = A_left <= B_left A_right_shaded = A_right", "+= self.stroke(text, color) strokes += COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes + COLOR_RESET)", "pos = Pos(line, thermo_left) + anchor back = color_func(1.0 - 1.0 * line", "anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor = anchor + Pos(2, 3) for line", "must be either 3-tuple or int\") def __str__(self): return \"{%d, %d, %d}\" %", "A_right >= B_right A_left_dodged = A_right < B_left A_right_dodged = A_left > B_right", "all elements to be rendered in current line elems_inline = [elem for elem", "+ inc, self.g + inc, self.b + inc) else: raise TypeError(\"operand type must", "calculate max length for row in table: for cell in row: try: new_cell", "at left or right bound of back # element. # 3. split: fore", "color, text): self.pos = pos self.color = color self.text = text class Canvas:", "else: raise TypeError(\"operand type must be either 3-tuple or int\") def __str__(self): return", "list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts, key=lambda x:x[0]) # handles if no elements in this", "inc), int(self.g * inc), int(self.b * inc)) else: raise TypeError(\"operand type must be", "if visible_parts == [] else \" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for", "elif not (A_left_shaded or A_right_shaded): # splitted return ((B_left, A_left, B_id),(A_right, B_right, B_id))", "rectangle area with given fore/back color and text content records position, size, color", "inc and len(inc) == 3: return Color(self.r + inc[0], self.g + inc[1], self.b", "self.elems.append(Rect(anchor+Pos(0, size.col), color, u\"┐\")) def add_grid(self, table, color_func, anchor=None): cell_size = 0 min_cell", "None: anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7) / 3)", "= 38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g,", "visible # parts. if A_left_dodged or A_right_dodged: # dodged return ((B_left, B_right, B_id),)", "new element with each # existing bound. A_left_shaded = A_left <= B_left A_right_shaded", "in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color, u\"└\"))", "type(fore) == tuple and len(fore) == 3: self.fore = Color(*fore) else: self.fore =", "cell length cell_anchor = anchor + Pos(2, 1) for [row_num, row] in enumerate(table):", "COLOR_RESET = '\\x01\\x1b[0m\\x02' for part in visible_parts: elem = elems_inline[part[2]] color = elem.color", "self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor, x_rep=3, x_off=0, y_rep=bar_width, y_off=0) hist_anchor =", "255)) for l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\" if", "cell_anchor = anchor + Pos(2, 1) for [row_num, row] in enumerate(table): for [col_num,", "for i, part in enumerate(visible_parts): visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten", "None: if (l + y_off) % y_rep == 0: tick_char = u\"┴\" if", "<= B_left A_right_shaded = A_right >= B_right A_left_dodged = A_right < B_left A_right_dodged", "= pos * (1, cell_size) + anchor + Pos(0, 2) back = color_func((cell-min_cell)/(max_cell-min_cell))", "type must be tuple\") def __str__(self): return str(self.fore) + \" \" + str(self.back)", "== float: return Color(int(self.r * inc), int(self.g * inc), int(self.b * inc)) else:", ": part[1] - elem.pos.col] strokes += self.stroke(text, color) strokes += COLOR_RESET if is_reset", "A_right_shaded: return ((B_left, A_left, B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col,", "+ anchor back = color_func(1.0 - 1.0 * line / (len(table) * 3+3))", "self.elems.append(Rect(Pos(l, size.col)+anchor, color, u\"│\")) for l in range(1,size.col): tick_char = u\"─\" if y_off", "except TypeError: new_cell = cell if cell_size < len(new_cell): cell_size = len(new_cell) cell_size", "strokes += self.stroke(text, color) strokes += COLOR_RESET if is_reset else \"\" sys.stdout.write(strokes +", "__mul__(self, inc): if type(inc) == tuple and len(inc) == 3: return Color(self.r *", "thus we have to coat it with one more # tuple in order", "* inc)) else: raise TypeError(\"operand type must be either 3-tuple or int\") def", "y_off=0) hist_anchor = anchor + Pos(2, 3) for line in range(height): for ith,", "on the right side thermo_left = len(table[0]) * cell_size + 10 for line", "= A_left > B_right # Four cases of shading: # 1. dodged: the", "= \"%1.2f\" % cell except TypeError: new_cell = cell if cell_size < len(new_cell):", "\")) # Add each cell into element table # and calculates the max", "4 def add_hist(self, hist, color_func, anchor=None): max_val = max(hist[0]) height = 30 bar_width", "self.b + inc.b) elif type(inc) == int: return Color(self.r + inc, self.g +", "if y_off is not None and y_rep is not None: if (l +", "str(self.back) class Rect: \"\"\" Rect: Draw a rectangle area with given fore/back color", ": self.back = Color(0,0,0) elif type(back) == tuple and len(back) == 3: self.back", "self.g, self.b) class CharColor: def __init__(self, fore, back=None): if type(fore) == tuple and", "if A_left_dodged or A_right_dodged: # dodged return ((B_left, B_right, B_id),) elif not (A_left_shaded", "color and content only. \"\"\" def __init__(self, pos, color, text): self.pos = pos", "CharColor: def __init__(self, fore, back=None): if type(fore) == tuple and len(fore) == 3:", "int: return Color(self.r + inc, self.g + inc, self.b + inc) else: raise", "+ hist_anchor if height * (1 - val/max_val) < line: color = color_func(val/max_val)", "def __init__(self, row, col): self.row = row self.col = col def __add__(self, pos):", "tuple in order to maintain the form. visible_parts = list(itertools.chain.from_iterable(visible_parts)) visible_parts = sorted(visible_parts,", "\"bottom\" in sides: self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char)) self.elems.append(Rect(anchor, color, u\"┌\")) self.elems.append(Rect(anchor+Pos(size.row, 0), color,", "visible_parts[i] = visible_check(elem_bound, part) visible_parts.append((elem_bound,)) # list flatten operation by itertools.chain flatten both", "raise TypeError(\"operand type must be tuple\") def __mul__(self, inc): if type(inc) == tuple:", "fore element shaded at left or right bound of back # element. #", "visible_parts == [] else \" \" * visible_parts[0][0] COLOR_RESET = '\\x01\\x1b[0m\\x02' for part", "cell_size = 0 min_cell = min([min(c) for c in table]) max_cell = max([max(c)", "stroke(self, text, c): COLOR_FORE = 38 COLOR_BACK = 48 color_seq = '\\x01\\x1b[{z};2;{r};{g};{b}m\\x02' fore", "# handles if no elements in this line strokes = \"\" if visible_parts", "= elem.color text = elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col] strokes +=", "CharColor(self.fore + inc, self.back + inc) else: raise TypeError(\"operand type must be tuple\")", "l in range(size.row+1): self.add_empty_line(Pos(l, 0) + anchor) tick_char = u\"│\" if x_off is", "color_func(1.0 - 1.0 * line / (len(table) * 3+3)) color = CharColor(Color(0, 0,", "if type(fore) == tuple and len(fore) == 3: self.fore = Color(*fore) else: self.fore", "elif (A_left_shaded and A_right_shaded): # fully shaded return [] else: # partially shaded", "len(table) * 3 + 6): pos = Pos(line, thermo_left) + anchor back =", "max length for row in table: for cell in row: try: new_cell =", "return Pos(self.row * pos_time[0], self.col * pos_time[1]) return Pos(self.row * pos_time.row, self.col *", "anchor = Pos(self.current_line, (self.cols - len(text)) / 2) color = color * (0.5,", "* cell_size + 10 for line in range(1, len(table) * 3 + 6):", "iterables), thus we have to coat it with one more # tuple in", "in enumerate(table): for [col_num, cell] in enumerate(row): add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True) add_cell(cell,", "is not None: if (l + y_off) % y_rep == 0: tick_char =", "return CharColor(self.fore * inc, self.back * inc) else: raise TypeError(\"operand type must be", "- 2) self.elems.append(Rect(pos, color, \" \" + cell + \" \")) # Add", "\"__main__\": c = Canvas() grid = np.random.random_sample(((7, 10))) hist = np.random.random_sample(((15, 1))) c.add_text(\"This", "((B_left, A_left, B_id),) for elem_i, elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col +", "elem in enumerate(elems_inline): elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i) for i, part" ]
[ "sys webui_root = \"webui/\" points = [] for i in range(1, len(sys.argv)): points.append(sys.argv[i])", "matplotlib # prevents pyplot from trying to connect to x windowing matplotlib.use('Agg') import", "<reponame>twmarshall/tbd import matplotlib # prevents pyplot from trying to connect to x windowing", "plt import sys webui_root = \"webui/\" points = [] for i in range(1,", "as plt import sys webui_root = \"webui/\" points = [] for i in", "connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import sys webui_root =", "import matplotlib # prevents pyplot from trying to connect to x windowing matplotlib.use('Agg')", "\"webui/\" points = [] for i in range(1, len(sys.argv)): points.append(sys.argv[i]) plt.plot(points) plt.ylabel('some numbers')", "[] for i in range(1, len(sys.argv)): points.append(sys.argv[i]) plt.plot(points) plt.ylabel('some numbers') plt.savefig(webui_root + \"tasks.png\")", "to connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import sys webui_root", "to x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import sys webui_root = \"webui/\"", "webui_root = \"webui/\" points = [] for i in range(1, len(sys.argv)): points.append(sys.argv[i]) plt.plot(points)", "import matplotlib.pyplot as plt import sys webui_root = \"webui/\" points = [] for", "trying to connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import sys", "from trying to connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import", "= [] for i in range(1, len(sys.argv)): points.append(sys.argv[i]) plt.plot(points) plt.ylabel('some numbers') plt.savefig(webui_root +", "pyplot from trying to connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt", "matplotlib.use('Agg') import matplotlib.pyplot as plt import sys webui_root = \"webui/\" points = []", "matplotlib.pyplot as plt import sys webui_root = \"webui/\" points = [] for i", "# prevents pyplot from trying to connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot", "prevents pyplot from trying to connect to x windowing matplotlib.use('Agg') import matplotlib.pyplot as", "windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import sys webui_root = \"webui/\" points =", "= \"webui/\" points = [] for i in range(1, len(sys.argv)): points.append(sys.argv[i]) plt.plot(points) plt.ylabel('some", "points = [] for i in range(1, len(sys.argv)): points.append(sys.argv[i]) plt.plot(points) plt.ylabel('some numbers') plt.savefig(webui_root", "import sys webui_root = \"webui/\" points = [] for i in range(1, len(sys.argv)):", "x windowing matplotlib.use('Agg') import matplotlib.pyplot as plt import sys webui_root = \"webui/\" points" ]
[ "open(\"input/5.txt\") as f: #with open(\"input/5.test\") as f: poly = f.read().strip() def is_reactable(x,y): return", "f: #with open(\"input/5.test\") as f: poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and", "# tail if is_reactable(p, q): result = result[:-1] # remove tail else: result", "q = result[-1] # tail if is_reactable(p, q): result = result[:-1] # remove", "return x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\",", "as f: #with open(\"input/5.test\") as f: poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower()", "open(\"input/5.test\") as f: poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower() !=", "continue q = result[-1] # tail if is_reactable(p, q): result = result[:-1] #", "for p in poly: if len(result)==0: result += p continue q = result[-1]", "is_reactable(p, q): result = result[:-1] # remove tail else: result += p print(len(result))", "x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result", "y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\"", "\"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for p in poly: if len(result)==0:", "assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for p in poly: if len(result)==0: result", "if is_reactable(p, q): result = result[:-1] # remove tail else: result += p", "x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\"))", "is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for p in poly: if", "= \"\" for p in poly: if len(result)==0: result += p continue q", "\"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for p in", "and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly))", "len(result)==0: result += p continue q = result[-1] # tail if is_reactable(p, q):", "result[-1] # tail if is_reactable(p, q): result = result[:-1] # remove tail else:", "+= p continue q = result[-1] # tail if is_reactable(p, q): result =", "\"\" for p in poly: if len(result)==0: result += p continue q =", "poly: if len(result)==0: result += p continue q = result[-1] # tail if", "= result[-1] # tail if is_reactable(p, q): result = result[:-1] # remove tail", "is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for p", "as f: poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower()", "\"A\")) print(len(poly)) result = \"\" for p in poly: if len(result)==0: result +=", "assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for", "f: poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower() assert(not", "poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\",", "result += p continue q = result[-1] # tail if is_reactable(p, q): result", "with open(\"input/5.txt\") as f: #with open(\"input/5.test\") as f: poly = f.read().strip() def is_reactable(x,y):", "= f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\"))", "in poly: if len(result)==0: result += p continue q = result[-1] # tail", "assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result = \"\" for p in poly:", "f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not", "is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\"))", "p in poly: if len(result)==0: result += p continue q = result[-1] #", "#with open(\"input/5.test\") as f: poly = f.read().strip() def is_reactable(x,y): return x.lower()==y.lower() and x.islower()", "result = \"\" for p in poly: if len(result)==0: result += p continue", "if len(result)==0: result += p continue q = result[-1] # tail if is_reactable(p,", "tail if is_reactable(p, q): result = result[:-1] # remove tail else: result +=", "!= y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\", \"B\")) assert(is_reactable(\"a\", \"A\")) print(len(poly)) result =", "p continue q = result[-1] # tail if is_reactable(p, q): result = result[:-1]", "def is_reactable(x,y): return x.lower()==y.lower() and x.islower() != y.islower() assert(not is_reactable(\"a\", \"a\")) assert(not is_reactable(\"a\",", "print(len(poly)) result = \"\" for p in poly: if len(result)==0: result += p" ]
[ "by Django 2.1.12 on 2019-09-11 05:50 from django.db import migrations class Migration(migrations.Migration): dependencies", "on 2019-09-11 05:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core',", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations =", "class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations = [ migrations.AlterUniqueTogether( name='product',", "[ ('core', '0010_auto_20190910_1022'), ] operations = [ migrations.AlterUniqueTogether( name='product', unique_together={('wishlist', 'product_id')}, ), ]", "05:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ]", "Django 2.1.12 on 2019-09-11 05:50 from django.db import migrations class Migration(migrations.Migration): dependencies =", "dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations = [ migrations.AlterUniqueTogether( name='product', unique_together={('wishlist', 'product_id')},", "migrations class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations = [ migrations.AlterUniqueTogether(", "Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations = [ migrations.AlterUniqueTogether( name='product', unique_together={('wishlist',", "2019-09-11 05:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'),", "<gh_stars>0 # Generated by Django 2.1.12 on 2019-09-11 05:50 from django.db import migrations", "= [ ('core', '0010_auto_20190910_1022'), ] operations = [ migrations.AlterUniqueTogether( name='product', unique_together={('wishlist', 'product_id')}, ),", "Generated by Django 2.1.12 on 2019-09-11 05:50 from django.db import migrations class Migration(migrations.Migration):", "# Generated by Django 2.1.12 on 2019-09-11 05:50 from django.db import migrations class", "2.1.12 on 2019-09-11 05:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations = [", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0010_auto_20190910_1022'), ] operations" ]
[ "link=[], ) # loop over authors logger.info('Getting brainglobe papers') for author_n, author_id in", "{paper[\"title\"]} to avoid duplicates') continue # skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @", "logger from rich import print from rich.table import Table from mdutils.mdutils import MdUtils", "paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth", "= ( '34308754', # <NAME> '3853277', # <NAME> '8668066', # <NAME> ) KEYWORDS", "for adding_year in years: mdFile.new_header(level=2, title=adding_year) # add papers for n in range(len(citations['id'])):", "using brainglobe's tools ''' AUTHORS = ( '34308754', # <NAME> '3853277', # <NAME>", "lines at top of file with open('_pages/references.md', 'r') as fin: content = fin.read()", "title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years = sorted(set(citations['year']))", "# create markdown file mdFile = MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\"", "for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not", "in KEYWORDS if kw in paper['abstract'].lower()] # add it to the list of", "Searches google scholar for papers using brainglobe's tools ''' AUTHORS = ( '34308754',", "('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations semantic scholar, for each", "[] brainglobe_papers = dict( id = [], year = [], title = [],", "= dict( id = [], year = [], title = [], authors =", "logger.debug('Updating markdown file') # create markdown file mdFile = MdUtils(file_name='_pages/references.md') # add metadata", "### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\")", "Centre Blog, March 2021_ ### [Cellfinder: Harnessing the power of deep learning to", "for paper_citations in citations: for paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue", "2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file() # remove extra empty", "logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our", "len(author.keys()): raise ValueError('Could not fetch author data, probably an API timeout error, wait", "is None: logger.debug(f' skipping paper {paper[\"title\"]} because it has not abstract') continue matched_keywords", "[Using deep learning to aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images)", "Blog, June 2021_ ### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre", "all publications and only keep the ones relevant for brainglobe. Then, use these", "year = [], title = [], authors = [], link=[], ) # loop", "March 2021_ ### [Cellfinder: Harnessing the power of deep learning to map the", "google scholar for papers using brainglobe's tools ''' AUTHORS = ( '34308754', #", "visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_ ### [Cellfinder:", "scholar, for each author in the list get all publications and only keep", "**citing_brainglobe} def print_citations(citations): ''' prints a list of citations as a rich tble", "file mdFile = MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\" --- permalink: /references", "<NAME> '3853277', # <NAME> '8668066', # <NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder',", "enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])}", "'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__': citations = fetch_citations() #", "'34308754', # <NAME> '3853277', # <NAME> '8668066', # <NAME> ) KEYWORDS = ('brainglobe',", "over papers for paper in author['papers']: paper = sch.paper(paper['paperId']) if not paper or", "'8668066', # <NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): '''", "papers using brainglobe's tools ''' AUTHORS = ( '34308754', # <NAME> '3853277', #", "not abstract') continue matched_keywords = [kw for kw in KEYWORDS if kw in", "Centre Blog, June 2021_ ### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome", "|{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added", "else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added", "to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_ ### [The best", "author_n, author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author =", "data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_ ### [Cellfinder: Harnessing the", "in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_ ### [Brainrender:", "in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_ ### [Cellfinder: Harnessing the power", "BrainGlobe tools ') years = sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year) #", "loop over authors logger.info('Getting brainglobe papers') for author_n, author_id in enumerate(AUTHORS): added =", "rich tble ''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light)", "''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for", "range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3,", "with open('_pages/references.md', 'r') as fin: content = fin.read() with open('_pages/references.md', 'w') as fout:", "in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in", "skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year']))", "in author['papers']: paper = sch.paper(paper['paperId']) if not paper or paper['abstract'] is None: logger.debug(f'", "for author_n, author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author", "style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb)", "= sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not", "blue_light ''' Searches google scholar for papers using brainglobe's tools ''' AUTHORS =", "import print from rich.table import Table from mdutils.mdutils import MdUtils import semanticscholar as", "tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), )", "learning to aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome", "'.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with the most", "Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June 2021_", "papers') logger.info('Getting papers citing our work') for paper_citations in citations: for paper in", "author in the list get all publications and only keep the ones relevant", "publications and only keep the ones relevant for brainglobe. Then, use these publications", "{**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a list of citations as a rich", "') years = sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year) # add papers", "the ones relevant for brainglobe. Then, use these publications to find papers citing", "aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog,", "list of brainglobe papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper:", "# loop over authors logger.info('Getting brainglobe papers') for author_n, author_id in enumerate(AUTHORS): added", "MdUtils import semanticscholar as sch from myterial import pink, blue_light ''' Searches google", "metadata & header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title: \"References\" --- \"\"\")", "_Sainsbury Wellcome Centre Blog, April 2020_ ### [The best neuroscience stories from April", "# <NAME> '8668066', # <NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def", "brainglobe papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to", "MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title:", "\"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']])", "brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a list of citations as", "kw in KEYWORDS if kw in paper['abstract'].lower()] # add it to the list", "Wellcome Centre Blog, June 2021_ ### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury", "only keep the ones relevant for brainglobe. Then, use these publications to find", "= [], year = [], title = [], authors = [], link=[], )", "fin: content = fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__", "from loguru import logger from rich import print from rich.table import Table from", "each author in the list get all publications and only keep the ones", "{author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()): raise", "list get all publications and only keep the ones relevant for brainglobe. Then,", "{added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our work')", "paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year']))", "press' mdFile.write(\"\"\" # BrainGlobe reported in press/online ### [Why These Python Coders are", "authors = [], link=[], ) citing_brainglobe = dict( id = [], year =", "= [] brainglobe_papers = dict( id = [], year = [], title =", "{pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n],", "to avoid duplicates') continue # skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\"", "NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers", "adding_year in years: mdFile.new_header(level=2, title=adding_year) # add papers for n in range(len(citations['id'])): year", "pink, blue_light ''' Searches google scholar for papers using brainglobe's tools ''' AUTHORS", "rich import print from rich.table import Table from mdutils.mdutils import MdUtils import semanticscholar", "brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_ ### [Cellfinder: Harnessing", "paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name']", "relevant for brainglobe. Then, use these publications to find papers citing them '''", "papers for paper in author['papers']: paper = sch.paper(paper['paperId']) if not paper or paper['abstract']", "style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]),", "of citations as a rich tble ''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year',", "add papers for n in range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n] if", "added += 1 else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with", "paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]} because it has not abstract') continue", "justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ',", "of file with open('_pages/references.md', 'r') as fin: content = fin.read() with open('_pages/references.md', 'w')", "title='Papers citing BrainGlobe tools ') years = sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2,", "mdFile.create_md_file() # remove extra empty lines at top of file with open('_pages/references.md', 'r')", "a list of citations as a rich tble ''' tb = Table(box=None, header_style=f'bold", "duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name']", "citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with", "April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file() # remove extra", "\"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers')", "= [], title = [], authors = [], link=[], ) # loop over", "in paper['abstract'].lower()] # add it to the list of brainglobe papers if matched_keywords:", "belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for", "brainglobe ''' logger.debug('Updating markdown file') # create markdown file mdFile = MdUtils(file_name='_pages/references.md') #", "Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])):", "citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing", "paper: {paper[\"title\"]} to avoid duplicates') continue # skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\"", "papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid", "@ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title'])", "import logger from rich import print from rich.table import Table from mdutils.mdutils import", "citing our work') for paper_citations in citations: for paper in paper_citations: if paper['paperId']", "kw in paper['abstract'].lower()] # add it to the list of brainglobe papers if", "fetch author data, probably an API timeout error, wait a bit.') # loop", "brainglobe papers') for author_n, author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author", "for paper in author['papers']: paper = sch.paper(paper['paperId']) if not paper or paper['abstract'] is", "Then, use these publications to find papers citing them ''' citations = []", "fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__': citations", "mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the press' mdFile.write(\"\"\" # BrainGlobe", "neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file()", ") print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with the most recent", "the list of brainglobe papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping", "{author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not fetch author data, probably an API", "are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June 2021_ ###", "[], link=[], ) citing_brainglobe = dict( id = [], year = [], title", "author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years", "from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file() # remove", "continue # skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations')", "sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not fetch", "\"\"\") # save mdFile.create_md_file() # remove extra empty lines at top of file", "with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__': citations =", "in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title'])", "add it to the list of brainglobe papers if matched_keywords: if paper['corpusId'] in", ") citing_brainglobe = dict( id = [], year = [], title = [],", "paper in author['papers']: paper = sch.paper(paper['paperId']) if not paper or paper['abstract'] is None:", "KEYWORDS if kw in paper['abstract'].lower()] # add it to the list of brainglobe", "paper['abstract'].lower()] # add it to the list of brainglobe papers if matched_keywords: if", "'brainreg') def fetch_citations(): ''' Fetches citations semantic scholar, for each author in the", "= [], link=[], ) citing_brainglobe = dict( id = [], year = [],", "cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_", "because it has not abstract') continue matched_keywords = [kw for kw in KEYWORDS", "logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])}", "in citations: for paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue # avoid", "paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): '''", "= citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) #", "deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_ ###", "tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to", "n in range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n] if year != adding_year:", "Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June 2021_ ### [Using", "# <NAME> '3853277', # <NAME> '8668066', # <NAME> ) KEYWORDS = ('brainglobe', 'brainrender',", "April 2020_ \"\"\") # save mdFile.create_md_file() # remove extra empty lines at top", "microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_ ### [Brainrender: visualising brain data", "wait a bit.') # loop over papers for paper in author['papers']: paper =", "our work') for paper_citations in citations: for paper in paper_citations: if paper['paperId'] in", "_Chan Zuckerberg Science Initiative (Medium), June 2021_ ### [Using deep learning to aid", ") KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations semantic", "in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found", "''' Replaces ./_pages/references.md to update with the most recent citations of papers using/citing", "brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1", "[Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative", "year = [], title = [], authors = [], link=[], ) citing_brainglobe =", "logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our work') for paper_citations in citations:", "semanticscholar as sch from myterial import pink, blue_light ''' Searches google scholar for", "bit.') # loop over papers for paper in author['papers']: paper = sch.paper(paper['paperId']) if", "BrainGlobe reported in press/online ### [Why These Python Coders are Joining the napari", "adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the press' mdFile.write(\"\"\"", "continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the press' mdFile.write(\"\"\" #", "brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\"", "{author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our work') for paper_citations in", "empty lines at top of file with open('_pages/references.md', 'r') as fin: content =", "# BrainGlobe reported in press/online ### [Why These Python Coders are Joining the", "citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing", "add metadata & header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title: \"References\" ---", "if year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in", "= sch.paper(paper['paperId']) if not paper or paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]}", "citations of papers using/citing brainglobe ''' logger.debug('Updating markdown file') # create markdown file", "logger.info('Getting brainglobe papers') for author_n, author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for", "data, probably an API timeout error, wait a bit.') # loop over papers", "keep the ones relevant for brainglobe. Then, use these publications to find papers", "3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_ ### [Cellfinder: Harnessing the power of", "\"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])}", "''' citations = [] brainglobe_papers = dict( id = [], year = [],", "3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June", "citations as a rich tble ''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right',", "= sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year) # add papers for n", "header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers", "it has not abstract') continue matched_keywords = [kw for kw in KEYWORDS if", "mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years = sorted(set(citations['year'])) for adding_year in years:", "detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_ ###", "Blog, March 2021_ ### [Cellfinder: Harnessing the power of deep learning to map", "stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file() #", "fetch_citations(): ''' Fetches citations semantic scholar, for each author in the list get", "fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__': citations = fetch_citations() # print_citations(citations) make_citations_markdown(citations)", "brainglobe papers') logger.info('Getting papers citing our work') for paper_citations in citations: for paper", "+= 1 else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}|", "title=adding_year) # add papers for n in range(len(citations['id'])): year = citations['year'][n] link =", "for each author in the list get all publications and only keep the", "( '34308754', # <NAME> '3853277', # <NAME> '8668066', # <NAME> ) KEYWORDS =", "0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}')", "citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints", "papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our work') for", "= [], link=[], ) # loop over authors logger.info('Getting brainglobe papers') for author_n,", "logger.info('Getting papers citing our work') for paper_citations in citations: for paper in paper_citations:", ") # loop over authors logger.info('Getting brainglobe papers') for author_n, author_id in enumerate(AUTHORS):", "= Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in", "= [], authors = [], link=[], ) citing_brainglobe = dict( id = [],", "these publications to find papers citing them ''' citations = [] brainglobe_papers =", "matched_keywords = [kw for kw in KEYWORDS if kw in paper['abstract'].lower()] # add", "\"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url'])", "{len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a list", "brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_ ### [The best neuroscience stories from", "Blog, April 2020_ ### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica),", "not paper or paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]} because it has", "2021_ ### [Using deep learning to aid 3D cell detection in whole brain", "ones relevant for brainglobe. Then, use these publications to find papers citing them", "the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June 2021_ ### [Using deep", "# avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found", "import semanticscholar as sch from myterial import pink, blue_light ''' Searches google scholar", "timeout error, wait a bit.') # loop over papers for paper in author['papers']:", "1 else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations')", "from myterial import pink, blue_light ''' Searches google scholar for papers using brainglobe's", "for kw in KEYWORDS if kw in paper['abstract'].lower()] # add it to the", "citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return", "auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def", "= [], authors = [], link=[], ) # loop over authors logger.info('Getting brainglobe", "save mdFile.create_md_file() # remove extra empty lines at top of file with open('_pages/references.md',", "papers for n in range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n] if year", "brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper", "paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in", "brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else:", "for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe}", "top of file with open('_pages/references.md', 'r') as fin: content = fin.read() with open('_pages/references.md',", "file') # create markdown file mdFile = MdUtils(file_name='_pages/references.md') # add metadata & header", "if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates') continue #", "n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): '''", "in range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n] if year != adding_year: continue", "year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the", "citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']])", "# skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId'])", "brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @", "paper = sch.paper(paper['paperId']) if not paper or paper['abstract'] is None: logger.debug(f' skipping paper", "reported in press/online ### [Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a)", "citations['year'][n] link = citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n])", "papers citing them ''' citations = [] brainglobe_papers = dict( id = [],", "authors = [], link=[], ) # loop over authors logger.info('Getting brainglobe papers') for", "duplicates') continue # skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}|", "and only keep the ones relevant for brainglobe. Then, use these publications to", "API timeout error, wait a bit.') # loop over papers for paper in", "find papers citing them ''' citations = [] brainglobe_papers = dict( id =", "citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a list of citations", "### [Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science", "tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n],", "--- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years = sorted(set(citations['year'])) for adding_year", "text=citations['title'][n]) ) # add 'in the press' mdFile.write(\"\"\" # BrainGlobe reported in press/online", "whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_ ### [Brainrender: visualising", "[], title = [], authors = [], link=[], ) citing_brainglobe = dict( id", "mdutils.mdutils import MdUtils import semanticscholar as sch from myterial import pink, blue_light '''", "to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}')", "& header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1,", "avoid duplicates') continue # skip duplicates logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with", "publications to find papers citing them ''' citations = [] brainglobe_papers = dict(", "# add metadata & header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title: \"References\"", "= [kw for kw in KEYWORDS if kw in paper['abstract'].lower()] # add it", "auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT belonging", "author['papers']: paper = sch.paper(paper['paperId']) if not paper or paper['abstract'] is None: logger.debug(f' skipping", "{len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our work') for paper_citations in citations: for", "rich.table import Table from mdutils.mdutils import MdUtils import semanticscholar as sch from myterial", "or paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]} because it has not abstract')", "as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__': citations = fetch_citations() # print_citations(citations)", "Initiative (Medium), June 2021_ ### [Using deep learning to aid 3D cell detection", "best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") # save", "with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting", "for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not fetch author data, probably an", "logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates') continue # skip duplicates logger.debug(f'Found brainglobe", "citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added +=", "content = fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ ==", "'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations semantic scholar, for each author in", "learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_ ### [The", "paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT belonging to brainglobe:", "Centre Blog, April 2020_ ### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire", "the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog,", "citations: for paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates", "brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates') continue # skip duplicates logger.debug(f'Found", "to the list of brainglobe papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f'", "[], year = [], title = [], authors = [], link=[], ) #", "print from rich.table import Table from mdutils.mdutils import MdUtils import semanticscholar as sch", "print_citations(citations): ''' prints a list of citations as a rich tble ''' tb", "Wellcome Centre Blog, April 2020_ ### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020)", "tools ''' AUTHORS = ( '34308754', # <NAME> '3853277', # <NAME> '8668066', #", "at top of file with open('_pages/references.md', 'r') as fin: content = fin.read() with", "def fetch_citations(): ''' Fetches citations semantic scholar, for each author in the list", "the list get all publications and only keep the ones relevant for brainglobe.", "'r') as fin: content = fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', ''))", "if not len(author.keys()): raise ValueError('Could not fetch author data, probably an API timeout", "logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if", "# add 'in the press' mdFile.write(\"\"\" # BrainGlobe reported in press/online ### [Why", "in the list get all publications and only keep the ones relevant for", "# <NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches", "# remove extra empty lines at top of file with open('_pages/references.md', 'r') as", "citations semantic scholar, for each author in the list get all publications and", "of brainglobe papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]}", "remove extra empty lines at top of file with open('_pages/references.md', 'r') as fin:", "in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces", "''' Searches google scholar for papers using brainglobe's tools ''' AUTHORS = (", "papers using/citing brainglobe ''' logger.debug('Updating markdown file') # create markdown file mdFile =", "'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations semantic scholar, for each author", "citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update", "<NAME> '8668066', # <NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations():", "header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row(", "deep learning to aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury", "# loop over papers for paper in author['papers']: paper = sch.paper(paper['paperId']) if not", "KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations semantic scholar,", "[], link=[], ) # loop over authors logger.info('Getting brainglobe papers') for author_n, author_id", "as fin: content = fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if", "from mdutils.mdutils import MdUtils import semanticscholar as sch from myterial import pink, blue_light", "citations = [] brainglobe_papers = dict( id = [], year = [], title", "[Cellfinder: Harnessing the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome", "import MdUtils import semanticscholar as sch from myterial import pink, blue_light ''' Searches", "images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_ ### [Brainrender: visualising brain data in", "use these publications to find papers citing them ''' citations = [] brainglobe_papers", "papers for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not fetch author data, probably", "list of citations as a rich tble ''' tb = Table(box=None, header_style=f'bold {pink}')", "with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations'])", "id = [], year = [], title = [], authors = [], link=[],", "markdown file mdFile = MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\" --- permalink:", "myterial import pink, blue_light ''' Searches google scholar for papers using brainglobe's tools", "''' AUTHORS = ( '34308754', # <NAME> '3853277', # <NAME> '8668066', # <NAME>", "for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers citing our work') for paper_citations", "papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a list of", "the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_ ### [The best neuroscience stories", "recent citations of papers using/citing brainglobe ''' logger.debug('Updating markdown file') # create markdown", "./_pages/references.md to update with the most recent citations of papers using/citing brainglobe '''", "Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June 2021_ ### [Using deep learning to", "create markdown file mdFile = MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\" ---", "Science Initiative (Medium), June 2021_ ### [Using deep learning to aid 3D cell", "!= adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the press'", "raise ValueError('Could not fetch author data, probably an API timeout error, wait a", "Replaces ./_pages/references.md to update with the most recent citations of papers using/citing brainglobe", "<reponame>brainglobe/brainglobe-web<filename>scripts/scraper.py from loguru import logger from rich import print from rich.table import Table", "import pink, blue_light ''' Searches google scholar for papers using brainglobe's tools '''", "logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a", "author data, probably an API timeout error, wait a bit.') # loop over", "continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url'])", "loguru import logger from rich import print from rich.table import Table from mdutils.mdutils", "if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates')", "### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_", "for n in range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n] if year !=", "skipping paper: {paper[\"title\"]} to avoid duplicates') continue # skip duplicates logger.debug(f'Found brainglobe paper:", "avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])}", "2020_ ### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_", "the most recent citations of papers using/citing brainglobe ''' logger.debug('Updating markdown file') #", "Zuckerberg Science Initiative (Medium), June 2021_ ### [Using deep learning to aid 3D", "= 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for", "abstract') continue matched_keywords = [kw for kw in KEYWORDS if kw in paper['abstract'].lower()]", "update with the most recent citations of papers using/citing brainglobe ''' logger.debug('Updating markdown", "|{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe papers') logger.info('Getting papers", "= [], title = [], authors = [], link=[], ) citing_brainglobe = dict(", "tble ''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors')", "matched_keywords: if paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates') continue", "title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the press' mdFile.write(\"\"\" # BrainGlobe reported", "citing BrainGlobe tools ') years = sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year)", "markdown file') # create markdown file mdFile = MdUtils(file_name='_pages/references.md') # add metadata &", "years: mdFile.new_header(level=2, title=adding_year) # add papers for n in range(len(citations['id'])): year = citations['year'][n]", "_Sainsbury Wellcome Centre Blog, June 2021_ ### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d)", "2020_ \"\"\") # save mdFile.create_md_file() # remove extra empty lines at top of", "sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year) # add papers for n in", "citing them ''' citations = [] brainglobe_papers = dict( id = [], year", "of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_", "file with open('_pages/references.md', 'r') as fin: content = fin.read() with open('_pages/references.md', 'w') as", "{len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not fetch author data,", "for brainglobe. Then, use these publications to find papers citing them ''' citations", "if paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for", "an API timeout error, wait a bit.') # loop over papers for paper", "June 2021_ ### [Using deep learning to aid 3D cell detection in whole", "range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md", "to update with the most recent citations of papers using/citing brainglobe ''' logger.debug('Updating", "_NeuroWire (Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file() # remove extra empty lines", "over authors logger.info('Getting brainglobe papers') for author_n, author_id in enumerate(AUTHORS): added = 0", "make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with the most recent citations of papers", "title = [], authors = [], link=[], ) # loop over authors logger.info('Getting", "June 2021_ ### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog,", "prints a list of citations as a rich tble ''' tb = Table(box=None,", "for auth in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT", "mdFile = MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\" --- permalink: /references author_profile:", "### [Using deep learning to aid 3D cell detection in whole brain microscopy", "Harnessing the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre", "mdFile.write(text=\"\"\" --- permalink: /references author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing", "to aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre", "them ''' citations = [] brainglobe_papers = dict( id = [], year =", "loop over papers for paper in author['papers']: paper = sch.paper(paper['paperId']) if not paper", "brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for auth", "return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations): ''' prints a list of citations as a", "brainglobe. Then, use these publications to find papers citing them ''' citations =", "None: logger.debug(f' skipping paper {paper[\"title\"]} because it has not abstract') continue matched_keywords =", "citing_brainglobe = dict( id = [], year = [], title = [], authors", "brainglobe's tools ''' AUTHORS = ( '34308754', # <NAME> '3853277', # <NAME> '8668066',", "author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could", "''' logger.debug('Updating markdown file') # create markdown file mdFile = MdUtils(file_name='_pages/references.md') # add", "AUTHORS = ( '34308754', # <NAME> '3853277', # <NAME> '8668066', # <NAME> )", "[kw for kw in KEYWORDS if kw in paper['abstract'].lower()] # add it to", "[], year = [], title = [], authors = [], link=[], ) citing_brainglobe", "logger.debug(f'Found brainglobe paper: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') brainglobe_papers['id'].append(paper['corpusId']) brainglobe_papers['year'].append(str(paper['year'])) brainglobe_papers['authors'].append([auth['name'] for", "@ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found {len(brainglobe_papers[\"id\"])} brainglobe", "papers citing our work') for paper_citations in citations: for paper in paper_citations: if", "using/citing brainglobe ''' logger.debug('Updating markdown file') # create markdown file mdFile = MdUtils(file_name='_pages/references.md')", "continue matched_keywords = [kw for kw in KEYWORDS if kw in paper['abstract'].lower()] #", "ValueError('Could not fetch author data, probably an API timeout error, wait a bit.')", "mdFile.write(\"\"\" # BrainGlobe reported in press/online ### [Why These Python Coders are Joining", "brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images) _Sainsbury Wellcome Centre Blog, June 2021_ ### [Brainrender: visualising brain", "as sch from myterial import pink, blue_light ''' Searches google scholar for papers", "[], authors = [], link=[], ) citing_brainglobe = dict( id = [], year", "map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April 2020_ ### [The best neuroscience", "extra empty lines at top of file with open('_pages/references.md', 'r') as fin: content", "permalink: /references author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools", "author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id)", "logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()): raise ValueError('Could not fetch author", "link = citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) )", "papers') for author_n, author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}')", "it to the list of brainglobe papers if matched_keywords: if paper['corpusId'] in brainglobe_papers['id']:", "from rich.table import Table from mdutils.mdutils import MdUtils import semanticscholar as sch from", "= fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__':", "a rich tble ''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title',", "citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe')", "press/online ### [Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg", "if kw in paper['abstract'].lower()] # add it to the list of brainglobe papers", "most recent citations of papers using/citing brainglobe ''' logger.debug('Updating markdown file') # create", "the press' mdFile.write(\"\"\" # BrainGlobe reported in press/online ### [Why These Python Coders", "power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury Wellcome Centre Blog, April", "Table from mdutils.mdutils import MdUtils import semanticscholar as sch from myterial import pink,", "(Medium), June 2021_ ### [Using deep learning to aid 3D cell detection in", "# add it to the list of brainglobe papers if matched_keywords: if paper['corpusId']", "in years: mdFile.new_header(level=2, title=adding_year) # add papers for n in range(len(citations['id'])): year =", "dict( id = [], year = [], title = [], authors = [],", "[], authors = [], link=[], ) # loop over authors logger.info('Getting brainglobe papers')", "### [Cellfinder: Harnessing the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain) _Sainsbury", "= ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations semantic scholar, for", "work') for paper_citations in citations: for paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']:", "sch.paper(paper['paperId']) if not paper or paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]} because", "'in the press' mdFile.write(\"\"\" # BrainGlobe reported in press/online ### [Why These Python", "paper['corpusId'] in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates') continue # skip", "to find papers citing them ''' citations = [] brainglobe_papers = dict( id", "link=[], ) citing_brainglobe = dict( id = [], year = [], title =", "author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers for {author[\"name\"]}') if not len(author.keys()):", "not fetch author data, probably an API timeout error, wait a bit.') #", "paper {paper[\"title\"]} because it has not abstract') continue matched_keywords = [kw for kw", "import Table from mdutils.mdutils import MdUtils import semanticscholar as sch from myterial import", "for paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue # avoid duplicates citing_brainglobe['id'].append(paper['paperId'])", "not len(author.keys()): raise ValueError('Could not fetch author data, probably an API timeout error,", "Fetches citations semantic scholar, for each author in the list get all publications", "def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with the most recent citations of", "[The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April 2020_ \"\"\") #", "as a rich tble ''' tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim')", "def print_citations(citations): ''' prints a list of citations as a rich tble '''", "skipping paper {paper[\"title\"]} because it has not abstract') continue matched_keywords = [kw for", "2021_ ### [Cellfinder: Harnessing the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain)", "Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June", "/references author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ')", "print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with the most recent citations", "open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n', '')) if __name__ == '__main__': citations = fetch_citations()", "These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium),", "add 'in the press' mdFile.write(\"\"\" # BrainGlobe reported in press/online ### [Why These", "', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations): ''' Replaces ./_pages/references.md to update with the", "for papers using brainglobe's tools ''' AUTHORS = ( '34308754', # <NAME> '3853277',", "in paper['authors']]) brainglobe_papers['title'].append(paper['title']) brainglobe_papers['link'].append(paper['url']) citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT belonging to", "April 2020_ ### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020) _NeuroWire (Scientifica), April", "years = sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year) # add papers for", ") # add 'in the press' mdFile.write(\"\"\" # BrainGlobe reported in press/online ###", "of papers using/citing brainglobe ''' logger.debug('Updating markdown file') # create markdown file mdFile", "paper_citations in citations: for paper in paper_citations: if paper['paperId'] in citing_brainglobe['id']: continue #", "error, wait a bit.') # loop over papers for paper in author['papers']: paper", "semantic scholar, for each author in the list get all publications and only", "get all publications and only keep the ones relevant for brainglobe. Then, use", "brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\" with |{paper[\"numCitedBy\"]}| citations') logger.debug(f'Added {added}/{len(author[\"papers\"])} papers for {author[\"name\"]}') logger.info(f'Found", "title = [], authors = [], link=[], ) citing_brainglobe = dict( id =", "scholar for papers using brainglobe's tools ''' AUTHORS = ( '34308754', # <NAME>", "napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan Zuckerberg Science Initiative (Medium), June 2021_ ### [Using deep learning", "tb = Table(box=None, header_style=f'bold {pink}') tb.add_column('Year', justify='right', style='dim') tb.add_column('Title', style=blue_light) tb.add_column('Authors') for n", "authors logger.info('Getting brainglobe papers') for author_n, author_id in enumerate(AUTHORS): added = 0 logger.debug(f'Fetching", "<NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg') def fetch_citations(): ''' Fetches citations", "(Scientifica), April 2020_ \"\"\") # save mdFile.create_md_file() # remove extra empty lines at", "[], title = [], authors = [], link=[], ) # loop over authors", "''' Fetches citations semantic scholar, for each author in the list get all", "in press/online ### [Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a) _Chan", "= citations['year'][n] link = citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link,", "true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years =", "in brainglobe_papers['id']: logger.debug(f' skipping paper: {paper[\"title\"]} to avoid duplicates') continue # skip duplicates", "sch from myterial import pink, blue_light ''' Searches google scholar for papers using", "added = 0 logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}') author = sch.author(author_id) logger.debug(f'Found {len(author[\"papers\"])} papers", "--- permalink: /references author_profile: true title: \"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe", "2021_ ### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March", "open('_pages/references.md', 'r') as fin: content = fin.read() with open('_pages/references.md', 'w') as fout: fout.write(content.replace('\\n\\n\\n\\n',", "probably an API timeout error, wait a bit.') # loop over papers for", "# add papers for n in range(len(citations['id'])): year = citations['year'][n] link = citations['link'][n]", "mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add 'in the press' mdFile.write(\"\"\" # BrainGlobe reported in", "\"References\" --- \"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years = sorted(set(citations['year'])) for", "logger.debug(f' skipping paper {paper[\"title\"]} because it has not abstract') continue matched_keywords = [kw", "citations.append(paper['citations']) added += 1 else: logger.debug(f'Paper NOT belonging to brainglobe: \"{paper[\"title\"]}\" @ \"{paper[\"venue\"]}\"", "'3853277', # <NAME> '8668066', # <NAME> ) KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg')", "_Sainsbury Wellcome Centre Blog, March 2021_ ### [Cellfinder: Harnessing the power of deep", "paper or paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]} because it has not", "duplicates citing_brainglobe['id'].append(paper['paperId']) citing_brainglobe['year'].append(str(paper['year'])) citing_brainglobe['title'].append(paper['title']) citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers", "citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3, title= mdFile.new_inline_link(link=link, text=citations['title'][n]) ) # add", "mdFile.new_header(level=2, title=adding_year) # add papers for n in range(len(citations['id'])): year = citations['year'][n] link", "has not abstract') continue matched_keywords = [kw for kw in KEYWORDS if kw", "# save mdFile.create_md_file() # remove extra empty lines at top of file with", "year = citations['year'][n] link = citations['link'][n] if year != adding_year: continue mdFile.new_header(level=3, title=", "a bit.') # loop over papers for paper in author['papers']: paper = sch.paper(paper['paperId'])", "in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers, **citing_brainglobe} def print_citations(citations):", "for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def make_citations_markdown(citations):", "\"\"\") mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ') years = sorted(set(citations['year'])) for adding_year in", "Wellcome Centre Blog, March 2021_ ### [Cellfinder: Harnessing the power of deep learning", "{paper[\"title\"]} because it has not abstract') continue matched_keywords = [kw for kw in", "with the most recent citations of papers using/citing brainglobe ''' logger.debug('Updating markdown file')", "brainglobe_papers = dict( id = [], year = [], title = [], authors", "= MdUtils(file_name='_pages/references.md') # add metadata & header mdFile.write(text=\"\"\" --- permalink: /references author_profile: true", "[Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d) _Sainsbury Wellcome Centre Blog, March 2021_ ###", "tb.add_column('Authors') for n in range(len(citations['id'])): tb.add_row( citations['year'][n], citations['title'][n], ', '.join(citations['authors'][n]), ) print(tb) def", "tools ') years = sorted(set(citations['year'])) for adding_year in years: mdFile.new_header(level=2, title=adding_year) # add", "from rich import print from rich.table import Table from mdutils.mdutils import MdUtils import", "citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']]) citing_brainglobe['link'].append(paper['url']) logger.info(f'Found {len(citing_brainglobe[\"id\"])} papers citing brainglobe') return {**brainglobe_papers,", "if not paper or paper['abstract'] is None: logger.debug(f' skipping paper {paper[\"title\"]} because it", "''' prints a list of citations as a rich tble ''' tb =" ]
[ "auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\")", "response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f,", "auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as", "= os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def = json.load(f)", "Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month: int,", "dictionary for all the month in the 3 letter format. as this is", "\"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\",", "we authenticate with the server. when we send it to the server with", "way. global api_def # read this API definition from the file in the", "auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month: int, year: int): # get today's", "else year response = get_global_daily_report(mon, year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\",", "folder, and then store it for later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\",", "don't need to regenerate it. We can just store the token # and", "global bearer_token # the token that the server sends us has a lifetime", "as token_file: # is the time difference between now and the date time", "take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200:", "and month > today.month): raise Exception( \"Invalid date range! No valid data prior", "open(token_file_path) as token_file: # is the time difference between now and the date", "change to the API we don't have to touch the code. We can", "letter format. as this is the only # sure way of getting it", "token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600): return #", "can refresh it. See a simple example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\",", "regenerate it. We can just store the token # and load it the", "if auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token,", "is the time difference between now and the date time the token was", "server sends us has a lifetime of ~55 hours. Hence, we don't need", "# is the time difference between now and the date time the token", "auth\") def get_global_daily_report(month: int, year: int): # get today's date information today =", "response = get_global_daily_report(mon, year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)),", "is valid or not. If it is not, then you can refresh it.", "Jan 2020 and current month and year\") # connect to the server to", "\"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\",", "we want to use requires us to authenticate with some form of username", "the same way. global api_def # read this API definition from the file", "a bearer token, which in simpler terms is a way for it to", "if the date range supplied actually makes sense. Covid data is tabulated from", "words, it is a token we get when we authenticate with the server.", "~55 hours. Hence, we don't need to regenerate it. We can just store", "response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token", "a global variable for the bearer token. # what is a bearer token?", "\"\" api_def = None def read_api_def(): # we store this API definition in", "need to regenerate the token. # you can write some code trivially by", "the 3 letter format. as this is the only # sure way of", "config folder, and then store it for later use. api_def_file_path = os.path.join( os.path.dirname(__file__),", "# the API returns a bearer token, which in simpler terms is a", "you can write some code trivially by storing the date and time the", "month and year\") # connect to the server to get the data. we", "is a way for it to know who is making a request and", "# https://www.covid19api.dev/#intro # creating a static dictionary for all the month in the", "the code. We can just # change the API endpoint, and the code", "hours are up, we need to regenerate the token. # you can write", "month is valid if month not in months.keys(): raise Exception( \"Invalid month range!", "3 letter format. as this is the only # sure way of getting", "Exception(\"Problem with auth\") def get_global_daily_report(month: int, year: int): # get today's date information", "# creating a static dictionary for all the month in the 3 letter", "init() mon = datetime.datetime.today().month if mon == 0 else mon year = datetime.datetime.today().year", "Hence, we don't need to regenerate it. We can just store the token", "# to check if the token present is valid or not. If it", "the token present is valid or not. If it is not, then you", "1-12\") # check if the date range supplied actually makes sense. Covid data", "{ \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return (", "year response = get_global_daily_report(mon, year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon,", "to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) }", "tracking API we want to use requires us to authenticate with some form", "if force == False and os.path.exists(token_file_path): with open(token_file_path) as token_file: # is the", "os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def = json.load(f) def", "a lot of date parsing. months = { 1: \"jan\", 2: \"feb\", 3:", "code. We can just # change the API endpoint, and the code should", "store it for later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path,", "token from scratch or need a new one since the old one expired", "bearer token. # what is a bearer token? in simple words, it is", "since the old one expired auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] }", "= json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds <", "init(): read_api_def() generate_token() def main(mon=0, year=0): init() mon = datetime.datetime.today().month if mon ==", "import os import datetime # https://www.covid19api.dev/#intro # creating a static dictionary for all", "8: \"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\" } # create", "month not in months.keys(): raise Exception( \"Invalid month range! please choose a month", "import urljoin import json import os import datetime # https://www.covid19api.dev/#intro # creating a", "getting it correct without having to do a lot of date parsing. months", "check if the token present is valid or not. If it is not,", "including, Jan 2020 and current month and year\") # connect to the server", "\"Invalid month range! please choose a month range between 1-12\") # check if", "token present is valid or not. If it is not, then you can", "json.load(f) def generate_token(force=False): # The covid 19 tracking API we want to use", "a file just so that whenever there is a change to the API", "datetime # https://www.covid19api.dev/#intro # creating a static dictionary for all the month in", "and also manage telemetry. global bearer_token # the token that the server sends", "api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look", "55 hours are up, we need to regenerate the token. # you can", "are up, we need to regenerate the token. # you can write some", "file itself, and then using it with the code below # to check", "today. if year < 2020 or year > today.year or (year == today.year", "== 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y,", "import datetime # https://www.covid19api.dev/#intro # creating a static dictionary for all the month", "it. See a simple example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") #", "okay we either need to fetch a token from scratch or need a", "= datetime.datetime.today().month if mon == 0 else mon year = datetime.datetime.today().year if year", "have a valid token already in the file if force == False and", "read_api_def(): # we store this API definition in a file just so that", "using this token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if", "the month in the 3 letter format. as this is the only #", "requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look at REST response codes", "get the data. we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token", "refresh it. See a simple example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\")", "later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f:", "or not. If it is not, then you can refresh it. See a", "for all the month in the 3 letter format. as this is the", "# sure way of getting it correct without having to do a lot", "can write some code trivially by storing the date and time the token", "need a new one since the old one expired auth_params = { \"username\":", "\"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as f: json.dump(auth_token, f,", "mon == 0 else mon year = datetime.datetime.today().year if year == 0 else", "telemetry. global bearer_token # the token that the server sends us has a", "difference between now and the date time the token was fetched > 50", "with the code below # to check if the token present is valid", "# change the API endpoint, and the code should work the same way.", "urljoin import json import os import datetime # https://www.covid19api.dev/#intro # creating a static", "token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 *", "token. # you can write some code trivially by storing the date and", "whenever there is a change to the API we don't have to touch", "date time the token was fetched > 50 hours? if no, then continue", "has a lifetime of ~55 hours. Hence, we don't need to regenerate it.", "api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response", "which in simpler terms is a way for it to know who is", "* 3600): return # okay we either need to fetch a token from", "was fetched > 50 hours? if no, then continue using this token token_details", "API returns a bearer token, which in simpler terms is a way for", "once 55 hours are up, we need to regenerate the token. # you", "today's date information today = datetime.datetime.today() # check if the month is valid", "if mon == 0 else mon year = datetime.datetime.today().year if year == 0", "the date and time the token was generated on in the json file", "None def read_api_def(): # we store this API definition in a file just", "with open(api_def_file_path, \"r\") as f: api_def = json.load(f) def generate_token(force=False): # The covid", "= get_global_daily_report(mon, year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\")", "{ \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) #", "bearer_token # the token that the server sends us has a lifetime of", "next time we bring up our script. However, it is to be noted", "returns a bearer token, which in simpler terms is a way for it", "token that the server sends us has a lifetime of ~55 hours. Hence,", "\"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\",", "a new one since the old one expired auth_params = { \"username\": api_def[\"username\"],", "also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer", "hours. Hence, we don't need to regenerate it. We can just store the", "also manage telemetry. global bearer_token # the token that the server sends us", "is a change to the API we don't have to touch the code.", "new one since the old one expired auth_params = { \"username\": api_def[\"username\"], \"password\":", "look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token =", "False and os.path.exists(token_file_path): with open(token_file_path) as token_file: # is the time difference between", "stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init():", "we don't need to regenerate it. We can just store the token #", "between and including, Jan 2020 and current month and year\") # connect to", "as f: api_def = json.load(f) def generate_token(force=False): # The covid 19 tracking API", "int): # get today's date information today = datetime.datetime.today() # check if the", "auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\":", "of ~55 hours. Hence, we don't need to regenerate it. We can just", "noted that once 55 hours are up, we need to regenerate the token.", "track of the number of requests a user has made and also manage", "a simple example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if", "and if that person can # use that endpoint. It also helps keep", "store this API definition in a file just so that whenever there is", "is to be noted that once 55 hours are up, we need to", "time the token was generated on in the json file itself, and then", "one expired auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post(", "\"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f, indent=4) if __name__ == \"__main__\":", "the server to get the data. we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format(", "to get the data. we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year)", "in the json file itself, and then using it with the code below", "valid data prior to Jan 2020 or in the future. Please choose a", "4: \"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\", 10:", "just # change the API endpoint, and the code should work the same", "year=0): init() mon = datetime.datetime.today().month if mon == 0 else mon year =", "you can refresh it. See a simple example below token_file_path = os.path.join( os.path.dirname(__file__),", "\"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a", "to the server with the bearer_token = \"\" api_def = None def read_api_def():", "should work the same way. global api_def # read this API definition from", "the data. we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token =", "date and time the token was generated on in the json file itself,", "to the API we don't have to touch the code. We can just", "is not, then you can refresh it. See a simple example below token_file_path", "def get_global_daily_report(month: int, year: int): # get today's date information today = datetime.datetime.today()", "os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def = json.load(f) def generate_token(force=False):", "%H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600): return # okay we", "Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month: int, year: int):", "problem occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def", "between now and the date time the token was fetched > 50 hours?", "get_global_daily_report(month: int, year: int): # get today's date information today = datetime.datetime.today() #", "REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"]", "please choose a month range between 1-12\") # check if the date range", "generate_token(force=False): # The covid 19 tracking API we want to use requires us", "manage telemetry. global bearer_token # the token that the server sends us has", "from Jan 2020 till today. if year < 2020 or year > today.year", "# connect to the server to get the data. we also need to", "year: int): # get today's date information today = datetime.datetime.today() # check if", "datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600): return", "today.year and month > today.month): raise Exception( \"Invalid date range! No valid data", "year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f:", "# create a global variable for the bearer token. # what is a", "of the number of requests a user has made and also manage telemetry.", "== 0 else year response = get_global_daily_report(mon, year) if response[0] == 200: with", "api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status", "( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def main(mon=0, year=0): init() mon", "with the bearer_token = \"\" api_def = None def read_api_def(): # we store", "the token # and load it the next time we bring up our", "No valid data prior to Jan 2020 or in the future. Please choose", "and year\") # connect to the server to get the data. we also", "please take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code ==", "the date range supplied actually makes sense. Covid data is tabulated from Jan", "def read_api_def(): # we store this API definition in a file just so", "us to authenticate with some form of username and password. To this request,", "to touch the code. We can just # change the API endpoint, and", "token_load_dt_tm).seconds < (50 * 3600): return # okay we either need to fetch", "3: \"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\", 9:", "current month and year\") # connect to the server to get the data.", "= { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return", "this is the only # sure way of getting it correct without having", "year < 2020 or year > today.year or (year == today.year and month", "\"r\") as f: api_def = json.load(f) def generate_token(force=False): # The covid 19 tracking", "json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path,", "there is a change to the API we don't have to touch the", "for later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as", "token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() -", "or in the future. Please choose a month and year between and including,", "with open(token_file_path) as token_file: # is the time difference between now and the", "global variable for the bearer token. # what is a bearer token? in", "# and load it the next time we bring up our script. However,", "prior to Jan 2020 or in the future. Please choose a month and", "\"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\")", "== today.year and month > today.month): raise Exception( \"Invalid date range! No valid", "check if the month is valid if month not in months.keys(): raise Exception(", "bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4)", "\"token.json\") # check if we have a valid token already in the file", "and then store it for later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\")", "that once 55 hours are up, we need to regenerate the token. #", "bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") }", "endpoint. It also helps keep a track of the number of requests a", "\"nov\", 12: \"dec\" } # create a global variable for the bearer token.", "0 else mon year = datetime.datetime.today().year if year == 0 else year response", "if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]),", "in simple words, it is a token we get when we authenticate with", "def generate_token(force=False): # The covid 19 tracking API we want to use requires", "2020 or year > today.year or (year == today.year and month > today.month):", "having to do a lot of date parsing. months = { 1: \"jan\",", "json file itself, and then using it with the code below # to", "https://www.covid19api.dev/#intro # creating a static dictionary for all the month in the 3", "it is not, then you can refresh it. See a simple example below", "https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\":", "choose a month and year between and including, Jan 2020 and current month", "month and year between and including, Jan 2020 and current month and year\")", "API definition in a file just so that whenever there is a change", "from requests.compat import urljoin import json import os import datetime # https://www.covid19api.dev/#intro #", "scratch or need a new one since the old one expired auth_params =", "url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look at REST response codes -", "then continue using this token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y,", "the future. Please choose a month and year between and including, Jan 2020", "the config folder, and then store it for later use. api_def_file_path = os.path.join(", "covid 19 tracking API we want to use requires us to authenticate with", "was generated on in the json file itself, and then using it with", "1: \"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\", 7:", "token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if we have a valid", "\"oct\", 11: \"nov\", 12: \"dec\" } # create a global variable for the", "api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def main(mon=0,", "date parsing. months = { 1: \"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\",", "a request and if that person can # use that endpoint. It also", "To this request, # the API returns a bearer token, which in simpler", "data prior to Jan 2020 or in the future. Please choose a month", "on in the json file itself, and then using it with the code", "file if force == False and os.path.exists(token_file_path): with open(token_file_path) as token_file: # is", "use requires us to authenticate with some form of username and password. To", "password. To this request, # the API returns a bearer token, which in", "200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f, indent=4) if", "don't have to touch the code. We can just # change the API", "return # okay we either need to fetch a token from scratch or", "file in the config folder, and then store it for later use. api_def_file_path", "username and password. To this request, # the API returns a bearer token,", "month > today.month): raise Exception( \"Invalid date range! No valid data prior to", "making a request and if that person can # use that endpoint. It", "check if the date range supplied actually makes sense. Covid data is tabulated", "\"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def = json.load(f) def generate_token(force=False): # The", "read_api_def() generate_token() def main(mon=0, year=0): init() mon = datetime.datetime.today().month if mon == 0", "future. Please choose a month and year between and including, Jan 2020 and", "and password. To this request, # the API returns a bearer token, which", "can just store the token # and load it the next time we", "is making a request and if that person can # use that endpoint.", "open(api_def_file_path, \"r\") as f: api_def = json.load(f) def generate_token(force=False): # The covid 19", "to regenerate it. We can just store the token # and load it", "it the next time we bring up our script. However, it is to", "request, # the API returns a bearer token, which in simpler terms is", "the file if force == False and os.path.exists(token_file_path): with open(token_file_path) as token_file: #", "a token from scratch or need a new one since the old one", "not. If it is not, then you can refresh it. See a simple", "yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param),", "for it to know who is making a request and if that person", "token already in the file if force == False and os.path.exists(token_file_path): with open(token_file_path)", "int, year: int): # get today's date information today = datetime.datetime.today() # check", "11: \"nov\", 12: \"dec\" } # create a global variable for the bearer", "datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4) else: print(\"A", "who is making a request and if that person can # use that", "script. However, it is to be noted that once 55 hours are up,", "os import datetime # https://www.covid19api.dev/#intro # creating a static dictionary for all the", "json import os import datetime # https://www.covid19api.dev/#intro # creating a static dictionary for", "We can just # change the API endpoint, and the code should work", "print(\"A problem occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\")", "it for later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\")", "so that whenever there is a change to the API we don't have", "our script. However, it is to be noted that once 55 hours are", "50 hours? if no, then continue using this token token_details = json.load(token_file) token_load_dt_tm", "below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if we have a", "auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]),", "definition from the file in the config folder, and then store it for", "definition in a file just so that whenever there is a change to", "= datetime.datetime.today() # check if the month is valid if month not in", "datetime.datetime.today() # check if the month is valid if month not in months.keys():", "Please choose a month and year between and including, Jan 2020 and current", "to do a lot of date parsing. months = { 1: \"jan\", 2:", "that endpoint. It also helps keep a track of the number of requests", "the API endpoint, and the code should work the same way. global api_def", "or (year == today.year and month > today.month): raise Exception( \"Invalid date range!", "< (50 * 3600): return # okay we either need to fetch a", "9: \"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\" } # create a global", "def init(): read_api_def() generate_token() def main(mon=0, year=0): init() mon = datetime.datetime.today().month if mon", "Exception( \"Invalid month range! please choose a month range between 1-12\") # check", "= json.load(f) def generate_token(force=False): # The covid 19 tracking API we want to", "data. we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = {", "api_def # read this API definition from the file in the config folder,", "authenticate with the server. when we send it to the server with the", "server. when we send it to the server with the bearer_token = \"\"", "or need a new one since the old one expired auth_params = {", "stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def main(mon=0, year=0): init() mon =", "raise Exception(\"Problem with auth\") def get_global_daily_report(month: int, year: int): # get today's date", "get today's date information today = datetime.datetime.today() # check if the month is", "to use requires us to authenticate with some form of username and password.", "and then using it with the code below # to check if the", "valid or not. If it is not, then you can refresh it. See", "one since the old one expired auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"]", "and os.path.exists(token_file_path): with open(token_file_path) as token_file: # is the time difference between now", "below # to check if the token present is valid or not. If", "time we bring up our script. However, it is to be noted that", "datetime.datetime.today().month if mon == 0 else mon year = datetime.datetime.today().year if year ==", "makes sense. Covid data is tabulated from Jan 2020 till today. if year", "# okay we either need to fetch a token from scratch or need", "\"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4) else:", "today = datetime.datetime.today() # check if the month is valid if month not", "API we want to use requires us to authenticate with some form of", "stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def main(mon=0, year=0): init() mon = datetime.datetime.today().month", "us has a lifetime of ~55 hours. Hence, we don't need to regenerate", "open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4) else: print(\"A problem occurred. Code: {},", "else: print(\"A problem occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with", "f: json.dump(auth_token, f, indent=4) else: print(\"A problem occurred. Code: {}, Message: {}\".format( auth_response.status_code,", "\"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def = json.load(f) def generate_token(force=False): #", "form of username and password. To this request, # the API returns a", "\"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f, indent=4) if __name__ == \"__main__\": main()", "os.path.dirname(__file__), \"config\", \"token.json\") # check if we have a valid token already in", "a track of the number of requests a user has made and also", "authenticate with some form of username and password. To this request, # the", "date range supplied actually makes sense. Covid data is tabulated from Jan 2020", "} stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def", "trivially by storing the date and time the token was generated on in", "Jan 2020 till today. if year < 2020 or year > today.year or", "creating a static dictionary for all the month in the 3 letter format.", "range between 1-12\") # check if the date range supplied actually makes sense.", "fetch a token from scratch or need a new one since the old", "= \"\" api_def = None def read_api_def(): # we store this API definition", "raise Exception( \"Invalid date range! No valid data prior to Jan 2020 or", "code should work the same way. global api_def # read this API definition", "it to know who is making a request and if that person can", "with auth\") def get_global_daily_report(month: int, year: int): # get today's date information today", "endpoint, and the code should work the same way. global api_def # read", "raise Exception( \"Invalid month range! please choose a month range between 1-12\") #", "year == 0 else year response = get_global_daily_report(mon, year) if response[0] == 200:", "a static dictionary for all the month in the 3 letter format. as", "expired auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"],", "= requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def()", "the API returns a bearer token, which in simpler terms is a way", "time the token was fetched > 50 hours? if no, then continue using", "of requests a user has made and also manage telemetry. global bearer_token #", "api_def = json.load(f) def generate_token(force=False): # The covid 19 tracking API we want", "requests a user has made and also manage telemetry. global bearer_token # the", "Exception( \"Invalid date range! No valid data prior to Jan 2020 or in", "# please take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code", "hours? if no, then continue using this token token_details = json.load(token_file) token_load_dt_tm =", "and the date time the token was fetched > 50 hours? if no,", "work the same way. global api_def # read this API definition from the", "by storing the date and time the token was generated on in the", "to the server to get the data. we also need to api_req_param =", "\"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\" } # create a", "way of getting it correct without having to do a lot of date", "api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def =", "10: \"oct\", 11: \"nov\", 12: \"dec\" } # create a global variable for", "this API definition from the file in the config folder, and then store", "to authenticate with some form of username and password. To this request, #", "5: \"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\", 11:", "simpler terms is a way for it to know who is making a", "file just so that whenever there is a change to the API we", "no, then continue using this token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"],", "helps keep a track of the number of requests a user has made", "\"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please", "data=auth_params) # please take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if", "== 0 else mon year = datetime.datetime.today().year if year == 0 else year", "token we get when we authenticate with the server. when we send it", "same way. global api_def # read this API definition from the file in", "this token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now()", "sends us has a lifetime of ~55 hours. Hence, we don't need to", "till today. if year < 2020 or year > today.year or (year ==", "date range! No valid data prior to Jan 2020 or in the future.", "generate_token() def main(mon=0, year=0): init() mon = datetime.datetime.today().month if mon == 0 else", "auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token)", "static dictionary for all the month in the 3 letter format. as this", "if the token present is valid or not. If it is not, then", "request and if that person can # use that endpoint. It also helps", "itself, and then using it with the code below # to check if", "the file in the config folder, and then store it for later use.", "a month range between 1-12\") # check if the date range supplied actually", "supplied actually makes sense. Covid data is tabulated from Jan 2020 till today.", "change the API endpoint, and the code should work the same way. global", "in the config folder, and then store it for later use. api_def_file_path =", "information today = datetime.datetime.today() # check if the month is valid if month", "{ 1: \"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\",", "%H:%M:%S\") } with open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4) else: print(\"A problem", "\"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\" } #", "a lifetime of ~55 hours. Hence, we don't need to regenerate it. We", "this request, # the API returns a bearer token, which in simpler terms", "{}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month: int, year: int): #", "200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")", "> today.month): raise Exception( \"Invalid date range! No valid data prior to Jan", "the bearer token. # what is a bearer token? in simple words, it", "fetched > 50 hours? if no, then continue using this token token_details =", "choose a month range between 1-12\") # check if the date range supplied", "the API we don't have to touch the code. We can just #", "if month not in months.keys(): raise Exception( \"Invalid month range! please choose a", "a token we get when we authenticate with the server. when we send", "It also helps keep a track of the number of requests a user", "has made and also manage telemetry. global bearer_token # the token that the", "main(mon=0, year=0): init() mon = datetime.datetime.today().month if mon == 0 else mon year", "do a lot of date parsing. months = { 1: \"jan\", 2: \"feb\",", "a valid token already in the file if force == False and os.path.exists(token_file_path):", "range supplied actually makes sense. Covid data is tabulated from Jan 2020 till", "in the future. Please choose a month and year between and including, Jan", "to know who is making a request and if that person can #", "However, it is to be noted that once 55 hours are up, we", "if year < 2020 or year > today.year or (year == today.year and", "it to the server with the bearer_token = \"\" api_def = None def", "(50 * 3600): return # okay we either need to fetch a token", "we send it to the server with the bearer_token = \"\" api_def =", "simple example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if we", "\"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code,", "the token. # you can write some code trivially by storing the date", "= json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with", "just so that whenever there is a change to the API we don't", "in the 3 letter format. as this is the only # sure way", "regenerate the token. # you can write some code trivially by storing the", "= datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600):", "2020 till today. if year < 2020 or year > today.year or (year", "headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def main(mon=0, year=0):", "a month and year between and including, Jan 2020 and current month and", "in the file if force == False and os.path.exists(token_file_path): with open(token_file_path) as token_file:", "# the token that the server sends us has a lifetime of ~55", "json.dump(auth_token, f, indent=4) else: print(\"A problem occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\")))", "data is tabulated from Jan 2020 till today. if year < 2020 or", "bring up our script. However, it is to be noted that once 55", "and the code should work the same way. global api_def # read this", "and time the token was generated on in the json file itself, and", "api_def = None def read_api_def(): # we store this API definition in a", "# read this API definition from the file in the config folder, and", "check if we have a valid token already in the file if force", "2: \"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\", 8:", "2020 or in the future. Please choose a month and year between and", "generated on in the json file itself, and then using it with the", "the token that the server sends us has a lifetime of ~55 hours.", "load it the next time we bring up our script. However, it is", "2020 and current month and year\") # connect to the server to get", "os.path.exists(token_file_path): with open(token_file_path) as token_file: # is the time difference between now and", "occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month:", "\"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\" } # create a global variable", "then using it with the code below # to check if the token", "token_file: # is the time difference between now and the date time the", "send it to the server with the bearer_token = \"\" api_def = None", "bearer token? in simple words, it is a token we get when we", "if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600): return # okay we either", "we need to regenerate the token. # you can write some code trivially", "the only # sure way of getting it correct without having to do", "range! No valid data prior to Jan 2020 or in the future. Please", "{ \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as f: json.dump(auth_token,", "Covid data is tabulated from Jan 2020 till today. if year < 2020", "time difference between now and the date time the token was fetched >", "is the only # sure way of getting it correct without having to", "made and also manage telemetry. global bearer_token # the token that the server", "bearer_token = \"\" api_def = None def read_api_def(): # we store this API", "We can just store the token # and load it the next time", "to Jan 2020 or in the future. Please choose a month and year", "sense. Covid data is tabulated from Jan 2020 till today. if year <", "open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f, indent=4) if __name__ ==", "the old one expired auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response", "def main(mon=0, year=0): init() mon = datetime.datetime.today().month if mon == 0 else mon", "the code below # to check if the token present is valid or", "is tabulated from Jan 2020 till today. if year < 2020 or year", "what is a bearer token? in simple words, it is a token we", "lifetime of ~55 hours. Hence, we don't need to regenerate it. We can", "and including, Jan 2020 and current month and year\") # connect to the", "== False and os.path.exists(token_file_path): with open(token_file_path) as token_file: # is the time difference", "it correct without having to do a lot of date parsing. months =", "terms is a way for it to know who is making a request", "some form of username and password. To this request, # the API returns", "import json import os import datetime # https://www.covid19api.dev/#intro # creating a static dictionary", "{0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") )", "global api_def # read this API definition from the file in the config", "lot of date parsing. months = { 1: \"jan\", 2: \"feb\", 3: \"mar\",", "month range! please choose a month range between 1-12\") # check if the", "not in months.keys(): raise Exception( \"Invalid month range! please choose a month range", "\"config\", \"token.json\") # check if we have a valid token already in the", "json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50", "import requests from requests.compat import urljoin import json import os import datetime #", "server with the bearer_token = \"\" api_def = None def read_api_def(): # we", "we store this API definition in a file just so that whenever there", "API endpoint, and the code should work the same way. global api_def #", "write some code trivially by storing the date and time the token was", "to regenerate the token. # you can write some code trivially by storing", "a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token", "between 1-12\") # check if the date range supplied actually makes sense. Covid", "continue using this token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\")", "month in the 3 letter format. as this is the only # sure", "up our script. However, it is to be noted that once 55 hours", "\"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\"", "< 2020 or year > today.year or (year == today.year and month >", "the bearer_token = \"\" api_def = None def read_api_def(): # we store this", "> 50 hours? if no, then continue using this token token_details = json.load(token_file)", "storing the date and time the token was generated on in the json", "is valid if month not in months.keys(): raise Exception( \"Invalid month range! please", "a user has made and also manage telemetry. global bearer_token # the token", "it with the code below # to check if the token present is", "create a global variable for the bearer token. # what is a bearer", "that whenever there is a change to the API we don't have to", "return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def main(mon=0, year=0): init()", "auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look at REST", "when we authenticate with the server. when we send it to the server", "if we have a valid token already in the file if force ==", "requests from requests.compat import urljoin import json import os import datetime # https://www.covid19api.dev/#intro", "present is valid or not. If it is not, then you can refresh", "for the bearer token. # what is a bearer token? in simple words,", "a way for it to know who is making a request and if", "it is a token we get when we authenticate with the server. when", "token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime( token_details[\"timestamp\"], \"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds", "know who is making a request and if that person can # use", "we either need to fetch a token from scratch or need a new", "to fetch a token from scratch or need a new one since the", "mon year = datetime.datetime.today().year if year == 0 else year response = get_global_daily_report(mon,", "this API definition in a file just so that whenever there is a", "# we store this API definition in a file just so that whenever", "= datetime.datetime.today().year if year == 0 else year response = get_global_daily_report(mon, year) if", "API definition from the file in the config folder, and then store it", "just store the token # and load it the next time we bring", "= None def read_api_def(): # we store this API definition in a file", "the json file itself, and then using it with the code below #", "as f: json.dump(auth_token, f, indent=4) else: print(\"A problem occurred. Code: {}, Message: {}\".format(", "\"%m/%d/%Y, %H:%M:%S\") if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600): return # okay", "variable for the bearer token. # what is a bearer token? in simple", "token was fetched > 50 hours? if no, then continue using this token", "read this API definition from the file in the config folder, and then", "need to fetch a token from scratch or need a new one since", "force == False and os.path.exists(token_file_path): with open(token_file_path) as token_file: # is the time", "either need to fetch a token from scratch or need a new one", "we get when we authenticate with the server. when we send it to", "\"Invalid date range! No valid data prior to Jan 2020 or in the", "sure way of getting it correct without having to do a lot of", "the token was fetched > 50 hours? if no, then continue using this", "up, we need to regenerate the token. # you can write some code", "we have a valid token already in the file if force == False", "mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get( url=urljoin(api_def[\"root_url\"],", "code below # to check if the token present is valid or not.", "os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if we have a valid token already", "actually makes sense. Covid data is tabulated from Jan 2020 till today. if", "== 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f, indent=4)", "need to regenerate it. We can just store the token # and load", "= api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response =", "0 else year response = get_global_daily_report(mon, year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__),", "of date parsing. months = { 1: \"jan\", 2: \"feb\", 3: \"mar\", 4:", "7: \"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\", 12: \"dec\" }", "tabulated from Jan 2020 till today. if year < 2020 or year >", "from the file in the config folder, and then store it for later", "use that endpoint. It also helps keep a track of the number of", "with the server. when we send it to the server with the bearer_token", "the server. when we send it to the server with the bearer_token =", "example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if we have", "{}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month: int, year:", "also helps keep a track of the number of requests a user has", "12: \"dec\" } # create a global variable for the bearer token. #", "range! please choose a month range between 1-12\") # check if the date", "today.month): raise Exception( \"Invalid date range! No valid data prior to Jan 2020", "valid token already in the file if force == False and os.path.exists(token_file_path): with", "} with open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4) else: print(\"A problem occurred.", "number of requests a user has made and also manage telemetry. global bearer_token", "now and the date time the token was fetched > 50 hours? if", "token was generated on in the json file itself, and then using it", "the time difference between now and the date time the token was fetched", "is a bearer token? in simple words, it is a token we get", "keep a track of the number of requests a user has made and", "at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token = json.loads(", "that person can # use that endpoint. It also helps keep a track", "valid if month not in months.keys(): raise Exception( \"Invalid month range! please choose", "f: api_def = json.load(f) def generate_token(force=False): # The covid 19 tracking API we", "datetime.datetime.today().year if year == 0 else year response = get_global_daily_report(mon, year) if response[0]", "touch the code. We can just # change the API endpoint, and the", "not, then you can refresh it. See a simple example below token_file_path =", "See a simple example below token_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check", "in months.keys(): raise Exception( \"Invalid month range! please choose a month range between", "token. # what is a bearer token? in simple words, it is a", "can # use that endpoint. It also helps keep a track of the", "can just # change the API endpoint, and the code should work the", "# you can write some code trivially by storing the date and time", "month range between 1-12\") # check if the date range supplied actually makes", "correct without having to do a lot of date parsing. months = {", "the server with the bearer_token = \"\" api_def = None def read_api_def(): #", "in a file just so that whenever there is a change to the", "# get today's date information today = datetime.datetime.today() # check if the month", "use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with open(api_def_file_path, \"r\") as f: api_def", "from scratch or need a new one since the old one expired auth_params", "= { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params)", "if the month is valid if month not in months.keys(): raise Exception( \"Invalid", "the month is valid if month not in months.keys(): raise Exception( \"Invalid month", "# check if the date range supplied actually makes sense. Covid data is", "of getting it correct without having to do a lot of date parsing.", "If it is not, then you can refresh it. See a simple example", "to be noted that once 55 hours are up, we need to regenerate", "- https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token = {", "with open(token_file_path, \"w\") as f: json.dump(auth_token, f, indent=4) else: print(\"A problem occurred. Code:", "api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take", "> today.year or (year == today.year and month > today.month): raise Exception( \"Invalid", "connect to the server to get the data. we also need to api_req_param", "The covid 19 tracking API we want to use requires us to authenticate", "(year == today.year and month > today.month): raise Exception( \"Invalid date range! No", "- token_load_dt_tm).seconds < (50 * 3600): return # okay we either need to", "code trivially by storing the date and time the token was generated on", "it is to be noted that once 55 hours are up, we need", ") def init(): read_api_def() generate_token() def main(mon=0, year=0): init() mon = datetime.datetime.today().month if", "} auth_response = requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look at", "with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as f: json.dump(json.loads(response[1]), f, indent=4) if __name__", "Jan 2020 or in the future. Please choose a month and year between", "requests.get( url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token()", "person can # use that endpoint. It also helps keep a track of", "\"w\") as f: json.dump(auth_token, f, indent=4) else: print(\"A problem occurred. Code: {}, Message:", "the date time the token was fetched > 50 hours? if no, then", "bearer token, which in simpler terms is a way for it to know", "way for it to know who is making a request and if that", "a change to the API we don't have to touch the code. We", "and year between and including, Jan 2020 and current month and year\") #", "all the month in the 3 letter format. as this is the only", "we bring up our script. However, it is to be noted that once", "url=urljoin(api_def[\"root_url\"], api_req_param), headers=auth_token) return ( stats_response.status_code, stats_response.content.decode(\"utf-8\") ) def init(): read_api_def() generate_token() def", "requires us to authenticate with some form of username and password. To this", "in simpler terms is a way for it to know who is making", "\"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\",", "indent=4) else: print(\"A problem occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem", "else mon year = datetime.datetime.today().year if year == 0 else year response =", "# check if we have a valid token already in the file if", "have to touch the code. We can just # change the API endpoint,", "then store it for later use. api_def_file_path = os.path.join( os.path.dirname(__file__), \"config\", \"api-def.json\") with", "using it with the code below # to check if the token present", "year > today.year or (year == today.year and month > today.month): raise Exception(", "if year == 0 else year response = get_global_daily_report(mon, year) if response[0] ==", "= { 1: \"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\", 6:", "token, which in simpler terms is a way for it to know who", "if no, then continue using this token token_details = json.load(token_file) token_load_dt_tm = datetime.datetime.strptime(", "\"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\", 6: \"jun\", 7: \"jul\", 8: \"aug\",", "api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token) } stats_response = requests.get(", "= { \"token\": bearer_token, \"timestamp\": datetime.datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\") } with open(token_file_path, \"w\") as f:", "3600): return # okay we either need to fetch a token from scratch", "the next time we bring up our script. However, it is to be", "then you can refresh it. See a simple example below token_file_path = os.path.join(", "with some form of username and password. To this request, # the API", "when we send it to the server with the bearer_token = \"\" api_def", "that the server sends us has a lifetime of ~55 hours. Hence, we", "f, indent=4) else: print(\"A problem occurred. Code: {}, Message: {}\".format( auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise", "only # sure way of getting it correct without having to do a", "server to get the data. we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]),", "and load it the next time we bring up our script. However, it", "= requests.post( url=urljoin(api_def[\"root_url\"], api_def[\"api_defs\"][\"gen_token\"]), data=auth_params) # please take a look at REST response", "a bearer token? in simple words, it is a token we get when", "user has made and also manage telemetry. global bearer_token # the token that", "requests.compat import urljoin import json import os import datetime # https://www.covid19api.dev/#intro # creating", "parsing. months = { 1: \"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\", 5:", "want to use requires us to authenticate with some form of username and", "we also need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\":", "months.keys(): raise Exception( \"Invalid month range! please choose a month range between 1-12\")", "token # and load it the next time we bring up our script.", "need to api_req_param = api_def[\"api_defs\"][\"global_daily_reports\"].format( mon=str(months[month]), yyyy=year) auth_token = { \"Authorization\": \"Bearer {0}\".format(bearer_token)", "codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status if auth_response.status_code == 200: bearer_token = json.loads( auth_response.content.decode(\"utf-8\"))[\"Document\"] auth_token =", "we don't have to touch the code. We can just # change the", "get_global_daily_report(mon, year) if response[0] == 200: with open(os.path.join(os.path.dirname(__file__), \"data\", \"data_{}{}.json\".format(mon, year)), \"w\") as", "already in the file if force == False and os.path.exists(token_file_path): with open(token_file_path) as", "the server sends us has a lifetime of ~55 hours. Hence, we don't", "be noted that once 55 hours are up, we need to regenerate the", "auth_response.status_code, auth_response.content.decode(\"utf-8\"))) raise Exception(\"Problem with auth\") def get_global_daily_report(month: int, year: int): # get", "mon = datetime.datetime.today().month if mon == 0 else mon year = datetime.datetime.today().year if", "store the token # and load it the next time we bring up", "some code trivially by storing the date and time the token was generated", "if that person can # use that endpoint. It also helps keep a", "without having to do a lot of date parsing. months = { 1:", "# what is a bearer token? in simple words, it is a token", "year between and including, Jan 2020 and current month and year\") # connect", "get when we authenticate with the server. when we send it to the", "= os.path.join( os.path.dirname(__file__), \"config\", \"token.json\") # check if we have a valid token", "# The covid 19 tracking API we want to use requires us to", "format. as this is the only # sure way of getting it correct", "(datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600): return # okay we either need", "the code should work the same way. global api_def # read this API", "the token was generated on in the json file itself, and then using", "today.year or (year == today.year and month > today.month): raise Exception( \"Invalid date", "as this is the only # sure way of getting it correct without", "and current month and year\") # connect to the server to get the", "to check if the token present is valid or not. If it is", "date information today = datetime.datetime.today() # check if the month is valid if", "# check if the month is valid if month not in months.keys(): raise", "\"dec\" } # create a global variable for the bearer token. # what", "it. We can just store the token # and load it the next", "} # create a global variable for the bearer token. # what is", "19 tracking API we want to use requires us to authenticate with some", "API we don't have to touch the code. We can just # change", "of username and password. To this request, # the API returns a bearer", "months = { 1: \"jan\", 2: \"feb\", 3: \"mar\", 4: \"apr\", 5: \"may\",", "# use that endpoint. It also helps keep a track of the number", "year\") # connect to the server to get the data. we also need", "6: \"jun\", 7: \"jul\", 8: \"aug\", 9: \"sep\", 10: \"oct\", 11: \"nov\", 12:", "token? in simple words, it is a token we get when we authenticate", "or year > today.year or (year == today.year and month > today.month): raise", "simple words, it is a token we get when we authenticate with the", "year = datetime.datetime.today().year if year == 0 else year response = get_global_daily_report(mon, year)", "is a token we get when we authenticate with the server. when we", "old one expired auth_params = { \"username\": api_def[\"username\"], \"password\": api_def[\"password\"] } auth_response =", "the number of requests a user has made and also manage telemetry. global" ]
[ "functions of other StateButtons :return: None \"\"\" def disable_buttons() -> None: \"\"\" Disable", "to specify \"grid\" if it needs to be redrawn :param kwargs: See the", "then clear channel, fills are made before blits. All the program's fill and", "the random walls button, 10% of the nodes in the grid will become", "obj in self.objects: if isinstance(obj, Gui): priority_to = obj return priority_to def handle_priority(priority_obj:", "\"\"\" Save the Grid object as a Pickle file in the Grids folder", "Gui: \"\"\" Initialise the main Gui for the visualizer module, most dependency issues", "to pygame def save() -> None: \"\"\" Save the Grid object as a", "disabled buttons before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool =", "the loop position_y = start_height - nodes_height position_x = cfg.button_background_rect.width - nodes_width for", "background, \"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler: def __init__(self, background: Background, increment:", "particular cases: def random_walls(self: GridButton) -> None: \"\"\" Function for the random walls", "(cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300,", "None) -> bool: \"\"\" Handle clicking events, will recursively pass down click events", "to handle click or handle input methods. TYPING: *additional: (event.type, function(event, *args) ->", "pass return used click = pg.Rect(mouse_pos, (1, 1)) click_used = False prio =", "attributes to the gui, used for dependency injection. \"\"\" self.objects = [] self.events", "+= event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui'", "is not None: temp = grid_obj.end grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill())", "click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except", "\") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right", "parameter is always injected in is_clicked :return: None \"\"\" self.is_activated = False for", "user input, the entered text is stored as a class attribute. A TextInputButton", "object as a Pickle file in the Grids folder (or other) :return: None", "stat handler (\"attribute\" = object) and in a list of stats \"\"\" self.__dict__.update(kwargs)", "input. :param event: must be of type pg.KEYDOWN event :return: None \"\"\" for", "display() method and can have an is_clicked() method to handle clicks on Buttons", "the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\" Get the time", "for the diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute. :param arg: diago_button.is_activated,", "GUI object to be able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY,", ":param root: Parent/root Gui :param child: Child Gui to remove from parent :return:", "in self.objects: try: if not button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root)", "from the main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"])", ":param background: Background object where the stats will be displayed (positioning is not", "displayed on the popup window :return: A Gui object representing the popup window", "GridButton, this parameter is always injected in is_clicked :return: None \"\"\" self.is_activated =", "cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes: position_x += nodes_width for node in", "button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return", "handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False) -> None: \"\"\" Stops the", "Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)),", "Gui to the main_gui :return: True if pathfinder is ready to run \"\"\"", "\", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func)", "+ 100 / 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) /", "def exit_func() -> None: \"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating", "try: if not used and not priority_obj.external: used = True if not used", "\"\"\" Call display() method on all of its objects. :param attributes: Call the", "redrawn :param kwargs: See the necessary kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"])", "main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal", "in the grid will become walls :param self: random_walls button object. For GridButton,", "fixed by injecting the necessary objects as arguments. First define the necessary functions", "None \"\"\" pathfinder_obj.display = arg # if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in", "False, and display the buttons the show change. :param arg: display_moves_button.is_activated, For Checkboxes", "10% of the nodes in the grid will become walls :param self: random_walls", "= join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\"", "= False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used click = pg.Rect(mouse_pos, (1,", "walls button, 10% of the nodes in the grid will become walls :param", "Creates a gui window or screen, if a Gui object is added to", "25 # Substracting the first because it will be incremented during the loop", "1: Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key", "self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None: \"\"\" Call display() method on", "-> None: \"\"\" Scale the grid object to fit current screen size, draw", "10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func)", "from tkinter import filedialog from random import randrange from typing import * import", "- 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in", "self.increment = increment def display(self) -> None: \"\"\" Display all Stat object in", "DropDownButton and any child Gui that might be spawned during the program :return:", "/ 2, (bg_height - text_surf.get_height()) / 3)) dimension_butt = Button((0, 0), \"OK\") ok_button", "/ 2 + 100 / 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() -", "will receive parameters (event, args). (I couldn't get the typing right...) :return: None", "be activated for this function to run, once the Enter key is pressed,", "\"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125", "changes :return: None \"\"\" def scale_and_draw() -> None: \"\"\" Scale the grid object", "event: must be of type pg.KEYDOWN event :return: None \"\"\" for button in", "def draw_all(self, *attributes: str) -> None: \"\"\" Call display() method on all of", "For Checkboxes this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.display", "= \\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus to pygame def", "*args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user input, the entered", "None: \"\"\" Removes the popup_gui from the root/parent Gui. Disable the grid from", "pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear()", "import filedialog from random import randrange from typing import * import pygame as", "not partial: node.status &= ~(Node.WALL | Node.END | Node.START) node.status &= ~(Node.SYM_RECT |", "= filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes}", "not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool) -> None: \"\"\"", "\"\"\" If priority object was found look if any clicks affect it, clicking", "not partial: if grid_obj.start is not None: temp = grid_obj.start grid_obj.start = None", "it will pass down its click events to it recursively until used. Specific", "cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found = False", "as a Pickle file in the Grids folder (or other) :return: None \"\"\"", "objects is clicked, the object's is_clicked() method will be called if it has", "Switches the pathfinder.running attribute on and off on every press if run conditions", "will pass down its click events to it recursively until used. Specific kwargs:", "config as cfg import constants as cst from classes import * folder_path =", "method and can have an is_clicked() method to handle clicks on Buttons (see", "to it recursively until used. Specific kwargs: - external=True Allows clicks outside the", "GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"]", "used, else False \"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if any", "grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right +", "for obj in main_gui_handler.objects: if obj.__class__ is not StateButton and obj is not", "that cannot be used during pathfinding :param arg: Not needed, but is included", "partial: True if resetting search, False if resetting grid :return: None\"\"\" pathfinder_obj.running =", "typed to avoid import) :return: StatsHandler object \"\"\" # defining stats getters def", "Switches the bool of pathfinder.display_steps attribute. Disables the run interval and wait time", "node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height - nodes_height grid_obj.display() def update_values(save_object:", "Text to be displayed on the popup window :return: A Gui object representing", "\"\"\" from pickle import load, dump from os import getcwd, mkdir from os.path", "== pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input", "in self.objects: if isinstance(obj, Gui): priority_to = obj return priority_to def handle_priority(priority_obj: Union[DropDownButton,", "Load a grid object from the Grids folder (or other), update values, scale", "+ 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 -", "this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo", "and blits orders are appended to one of the lists in cst.to_display (see", "main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom +", "\"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes)", "object, with injected dependency to the pathfinder to access stats values. First define", "(cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x +", "Display all Stat object in self.stats. :return: None \"\"\" self.background.display() for stat in", "nodes It also creates all buttons by the init_gui function \"\"\" from pickle", "node and an ending node. If no end or start node is defined,", "be able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid", "randrange from typing import * import pygame as pg import config as cfg", "temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False", "Gui): # Inject parent Gui dependency as root, to allow the child Gui", "if run conditions are met. Call pathfinder.init_search methode if pathfinder.search_is_init is False. Disable", "grid object to fit current screen size, draw the grid :return: None \"\"\"", "get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 35), get_path_len)) return", "= True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except", "all buttons by the init_gui function \"\"\" from pickle import load, dump from", "if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg", "run, once the Enter key is pressed, it's confirm_input(self.text_input) method will be called", "depend on the previous ones Last we create the Gui from the dict,", "from the pathfinder or the time since it started processing\"\"\" return round(pathfinder.algo_dt, 2)", "pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes this parameter is always injected in", "if grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START,", "try: obj.is_disabled = False obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\", "from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\" Get the", "is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used # TODO: alot of setter", "= len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\")", "input and redraw it to cover the popup :param root: Parent/root Gui :param", "= 200, **kwargs: Stat) -> None: \"\"\" Creates a Singleton Stats handler for", "pathfinder.display_steps attribute. Disables the run interval and wait time buttons of the main_gui", "= False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp = grid_obj.end grid_obj.end =", "redraw it to cover the popup necessary kwargs: - 'root': Parent/root Gui -", "None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp = grid_obj.end", "Check if any of the Gui's objects require priority on clicks (currently only", "pass return click_used # TODO: alot of setter functions could be shifted into", "not automatic) :param increment: Delay between updates of the stats in ms :param", "pathfinder or the time since it started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt()", "itself from the parent's objects once terminated :return: True if click was used,", "reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] =", "self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in additional: if", "tuples to handle other events. The function will receive parameters (event, args). (I", "allows the child to remove itself from the parent's objects once terminated :return:", "\"\"\" Function for the random walls button, 10% of the nodes in the", "events to child Gui if one is in its objects (LIMITED TO ONE", "for obj in self.objects: if isinstance(obj, Gui): priority_to = obj return priority_to def", "cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK,", "False obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\", "not priority_obj.external: used = True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False", "+ 10), \"Random walls\", func=random_walls) # algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood", "200, **kwargs: Stat) -> None: \"\"\" Creates a Singleton Stats handler for displaying", "\"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated", "+ 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"]", "pathfinder_obj.running # could add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display", "the stats in ms :param kwargs: add stat objects as attributes of the", "walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) # algo buttons", "(position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height - nodes_height grid_obj.display()", "the bool of pathfinder.display_steps attribute. Disables the run interval and wait time buttons", ":return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) -> None: \"\"\"", "current screen size, draw the grid :return: None \"\"\" # scale grid to", "its click events to it recursively until used. Specific kwargs: - external=True Allows", "save_object: save object loaded from pickle file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"]", "Stat object as kwargs for the StatsHandler :param pathfinder: Pathfinder object of the", "disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to show", "for displaying Stat objects on a Background (Background is important so the anti", "self.events = [] for name, obj in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\",", "load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj,", "that might be spawned during the program :return: object with priority \"\"\" priority_to", "its objects, it will pass down its click events to it recursively until", "check_conditions() -> bool: \"\"\" Check that an algorithm is defined, the grid has", "background: Background object where the stats will be displayed (positioning is not automatic)", "100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] =", "Instantiate the Stat object as kwargs for the StatsHandler :param pathfinder: Pathfinder object", "from typing import * import pygame as pg import config as cfg import", "method of the grid object, and injects the n_wide and n_high dependencies from", "exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() -> None: \"\"\"", "def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None: \"\"\" Creates a gui", "Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\":", "defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct,", "forbidden for child Gui :param priority_obj: The object with priority :return: True if", "grid go out of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height", "function to run, once the Enter key is pressed, it's confirm_input(self.text_input) method will", "= filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as file: save_object_ = load(file) update_values(save_object_)", "parent Gui. Use as follows: from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param", "Get algorithm process time from the pathfinder or the time since it started", "object was found look if any clicks affect it, clicking outside of a", "def pop_up(announcement: str) -> Gui: \"\"\" Creates a Pop-up window Gui with a", "GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase", "in its objects (LIMITED TO ONE CHILD GUI). If any of its objects", "clicking outside of a DropDownButton's rect is allowed and clicks will be registered,", "into a get_data(gui) method by the pathfinder to unclutter this # module and", "self.chrono += self.increment return True return False def main(self) -> None: \"\"\" Main", "not button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__", "not None: temp = grid_obj.end grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init", "the grid from receiving input and redraw it to cover the popup necessary", "root: Parent/root Gui :param child: Child Gui to remove from parent :return: None", "scale grid to screen, as well as possible, might make grid go out", "as file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\" Exit", "activated for this function to run, once the Enter key is pressed, it's", "priority \"\"\" priority_to = None for obj in self.objects: if isinstance(obj, Gui): priority_to", "is not grid_obj.start and node is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def", "= len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct:", "needs to be redrawn :param kwargs: See the necessary kwargs above :return: None", "not StateButton and obj is not main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError:", "pygame def save() -> None: \"\"\" Save the Grid object as a Pickle", "on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed on the popup window", "Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h)", "0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15,", "the algorithm associated with the AlgoButton :param self: inject reference to self. For", "~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try:", "\"\"\" Removes the popup_gui from the root/parent Gui. Disable the grid from receiving", "will be registered, but is forbidden for child Gui :param priority_obj: The object", "+ 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps),", "None \"\"\" self.background.display() for stat in self.stats: stat.display() def timer(self) -> bool: \"\"\"", "Gui from parent Gui on external clicks :param dict_object: all objects in dict_object", "main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) -> None: \"\"\" Switches the pathfinder.running attribute", "Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"]", "for the display moves Checkbox. Switches the bool of pathfinder.display_steps attribute. Disables the", "of the stats handler, it's the only thing that needs to be called", "mouse_pos: Tuple[int, int], root: 'Gui' = None) -> bool: \"\"\" Handle clicking events,", "necessary attributes, also reset the grid. If partial, leaves walls, start and end", "added to the Gui. Second, create a dict of all the objects to", "# module and remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui:", "will become walls :param self: random_walls button object. For GridButton, this parameter is", "node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\" Function for the", "Get the fps of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY,", "import randrange from typing import * import pygame as pg import config as", "Main loop of the stats handler, it's the only thing that needs to", "some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\" Initialise the main", ":return: None \"\"\" for obj in main_gui_handler.objects: if obj.__class__ is not StateButton and", "9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"]", "child Gui if one is in its objects (LIMITED TO ONE CHILD GUI).", "to be displayed on the popup window :return: A Gui object representing the", "objects as attributes of the stat handler (\"attribute\" = object) and in a", "Second, create a dict of all the objects to be added to the", "\"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button for button", ":param pathfinder_obj: Pathfinder object to link to the Gui (class is not typed", "Gui :param child: Child Gui to remove from parent :return: None \"\"\" root.grid.disabled", "the entered text is stored as a class attribute. A TextInputButton must be", "external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"]", "\"\"\" def ok_func(root: Gui, child: Gui) -> None: \"\"\" Removes the popup_gui from", ":param increment: Delay between updates of the stats in ms :param kwargs: add", "5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls)", "nodes_width for node in column: position_y += nodes_height node.height = nodes_height node.width =", "in self.stats. :return: None \"\"\" self.background.display() for stat in self.stats: stat.display() def timer(self)", "\"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] =", "\"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled", "Function for the random walls button, 10% of the nodes in the grid", "\"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"]", "False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could add pause/unpause timers... if", "position_y += nodes_height node.height = nodes_height node.width = nodes_width node.position = (position_x, position_y)", "for the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated,", "= SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return", "\"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15,", "Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] =", "= False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer", "def diago_func(arg: bool) -> None: \"\"\" Function for the diago_allowed Checkbox. Switches the", "cfg import constants as cst from classes import * folder_path = getcwd() grid_path", "def generate() -> None: \"\"\" Calls the generate method of the grid object,", "the root/parent Gui. Disable the grid from receiving input and redraw it to", "the popup window \"\"\" def ok_func(root: Gui, child: Gui) -> None: \"\"\" Removes", "10), 40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200,", "= not pathfinder_obj.running # could add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if", "the Gui's objects require priority on clicks (currently only for DropDownButton and any", "button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None) -> bool: \"\"\"", "(15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\":", "if pathfinder is ready to run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end:", "main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as file:", "load_grid() -> None: \"\"\" Load a grid object from the Grids folder (or", "position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height - nodes_height grid_obj.display() def", "its objects. :param attributes: Call the display() method on additional attributes of the", "Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed on the popup", "cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15),", "program's fill and blits orders are appended to one of the lists in", "tkinter import filedialog from random import randrange from typing import * import pygame", "get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length:", "might be spawned during the program :return: object with priority \"\"\" priority_to =", "node is not grid_obj.start and node is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill())", "clicks :param dict_object: all objects in dict_object MUST have a display() method and", "object with priority \"\"\" priority_to = None for obj in self.objects: if isinstance(obj,", "function \"\"\" from pickle import load, dump from os import getcwd, mkdir from", "the Gui from the dict, with pathfinder and grid added as kwargs. :param", "StatsHandler object, with injected dependency to the pathfinder to access stats values. First", "Stops the pathfinder, and reset all necessary attributes, also reset the grid. If", "Function for the display moves Checkbox. Switches the bool of pathfinder.display_steps attribute. Disables", "None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input = \"\"", "(main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom", "one of the lists in cst.to_display (see constants.py module), for special cases there", ":return: None \"\"\" def scale_and_draw() -> None: \"\"\" Scale the grid object to", "parent's objects once terminated :return: True if click was used, else False \"\"\"", "if event.type == user_event: func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process", "pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get the fps of the", "TO ONE CHILD GUI). If any of its objects is clicked, the object's", "started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\" Get the time", "to unclutter this # module and remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj:", "previous entry. # Also I wanted to make a flexible GUI object to", "kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input = \"\" def __init__(self, dict_object:", "pathfinding visualizer. It handles the function for clicking on buttons, using input buttons,", "class attribute. A TextInputButton must be activated for this function to run, once", "main Gui for the visualizer module, most dependency issues are fixed by injecting", "once it has been initialised :return: None \"\"\" if self.timer(): self.display() def init_stats(pathfinder:", "StatsHandler :param pathfinder: Pathfinder object of the program (Singleton) (class not typed to", "\"\"\" for button in self.objects: if button.__class__ is TextInputButton and button.is_activated: if event.key", "TODO: alot of setter functions could be shifted into a get_data(gui) method by", "event in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event)", "on buttons, using input buttons, displaying stats, popups and setting start/end nodes It", "from receiving input and redraw it to cover the popup necessary kwargs: -", "will recursively pass down click events to child Gui if one is in", "if False, and display the buttons the show change. :param arg: display_moves_button.is_activated, For", "Save the Grid object as a Pickle file in the Grids folder (or", "if prio: click_used = handle_priority(prio) # Clicking outside the child-most Gui is forbidden", "= (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height - nodes_height", "found look if any clicks affect it, clicking outside of a DropDownButton's rect", "mkdir from os.path import join, exists import tkinter from tkinter import filedialog from", "\"\"\" Initialise the main Gui for the visualizer module, most dependency issues are", "entering specific (event.type, function, (*args)) tuples to handle other events. The function will", "grid_n_high buttons and display changes :param save_object: save object loaded from pickle file", "False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp = grid_obj.end grid_obj.end = None", "main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool) -> None: \"\"\" Function for", "func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 - 25, \"default\": 100,", "objects. :param attributes: Call the display() method on additional attributes of the gui", "and off on every press if run conditions are met. Call pathfinder.init_search methode", "if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is TextInputButton:", "only for DropDownButton and any child Gui that might be spawned during the", "\"\"\" for event in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type ==", "necessary objects as arguments. First define the necessary functions for all buttons that", "is in its objects (LIMITED TO ONE CHILD GUI). If any of its", "except KeyError: pass def diago_func(arg: bool) -> None: \"\"\" Function for the diago_allowed", "main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 - 25, \"default\": 100, \"value\":", "(Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO: try", "the grid object, and injects the n_wide and n_high dependencies from the main_gui's", "for child Gui :param priority_obj: The object with priority :return: True if click", "- bg_height) / 2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button})", "bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always", "is always injected in is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated =", "get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt),", "displaying stats, popups and setting start/end nodes It also creates all buttons by", "len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with", "on and off on every press if run conditions are met. Call pathfinder.init_search", "-> None: \"\"\" Function for the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr", "to screen, as well as possible, might make grid go out of borders", "Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect,", "check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could add pause/unpause timers... if not pathfinder_obj.search_is_init:", "popup_gui from the root/parent Gui. Disable the grid from receiving input and redraw", "-> None: \"\"\" Function for the display moves Checkbox. Switches the bool of", "bg_height = 4 * text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2,", "-> None: \"\"\" Handle click and keyboard input events by redistributing to handle", "child to remove itself from the parent's objects once terminated :return: True if", "bg_width = 2 * text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj = (text_surf,", "to the Gui :return: Gui object \"\"\" # Button functions for particular cases:", "-> None: \"\"\" Creates a Singleton Stats handler for displaying Stat objects on", "\"\"\" used = False if isinstance(priority_obj, Gui): # Inject parent Gui dependency as", "diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes", "to access stats values. First define the getter functions for the Stat objects", "def display(self) -> None: \"\"\" Display all Stat object in self.stats. :return: None", "+ 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x +", "= [] for name, obj in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", [])", "every press if run conditions are met. Call pathfinder.init_search methode if pathfinder.search_is_init is", "func=random_walls) # algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False,", "arg: display_moves_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return: None", "+ 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\",", "obj in main_gui_handler.objects: if obj.__class__ is not StateButton and obj is not main_gui_handler.exit_button:", "text is stored as a class attribute. A TextInputButton must be activated for", "rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button for button in algo_buttons},", "Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled = False obj.display()", "objects - ext_close=True Removes Gui from parent Gui on external clicks :param dict_object:", "the grid. If partial, leaves walls, start and end nodes as is :param", "button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display() def", "if resetting search, False if resetting grid :return: None\"\"\" pathfinder_obj.running = False if", "the stat handler (\"attribute\" = object) and in a list of stats \"\"\"", "priority_obj.external: used = True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self,", "obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None: \"\"\" Call", "25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] =", "object to fit current screen size, draw the grid :return: None \"\"\" #", "root=root) elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used", "object. For GridButton, this parameter is always injected in is_clicked :return: None \"\"\"", "== -1: # update display to show disabled buttons before pathfinder starts looping", "main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as file: save_object_", "\"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display()", "root=self) try: if not used and not priority_obj.external: used = True if not", "button object. For GridButton, this parameter is always injected in is_clicked :return: None", "used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used", "parameters (event, args). (I couldn't get the typing right...) :return: None \"\"\" for", "else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if", "None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() ->", "on additional attributes of the gui :return: None \"\"\" for obj in self.objects:", "used during pathfinding :return: None \"\"\" for obj in main_gui_handler.objects: if obj.__class__ is", "-> None: \"\"\" Updates the attributes of the grid object and the values", "all objects in dict_object MUST have a display() method and can have an", "Child Gui to remove from Parent :param attributes: Used to specify \"grid\" if", "\"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"]", "prefabricated classes to use) :param kwargs: add attributes to the gui, used for", "= True except AttributeError: continue obj.display() def check_conditions() -> bool: \"\"\" Check that", "j) group.clear() def pop_up(announcement: str) -> Gui: \"\"\" Creates a Pop-up window Gui", "Gui]: \"\"\" Check if any of the Gui's objects require priority on clicks", "None if node.update_color() is not cst.BLACK: if not partial: node.status &= ~(Node.WALL |", "from Parent :param attributes: Used to specify \"grid\" if it needs to be", "= 0 pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes: for node in column:", "pathfinder.running attribute on and off on every press if run conditions are met.", "is forbidden if not click_used: for button in self.objects: try: if not button.is_disabled:", "not typed to avoid import) :return: StatsHandler object \"\"\" # defining stats getters", "if not partial: node.status &= ~(Node.WALL | Node.END | Node.START) node.status &= ~(Node.SYM_RECT", "= True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError:", "None: \"\"\" Does all the fills and blits to the window then clear", "column in grid_obj.all_nodes: for node in column: node.neighbors = None node.came_from = None", "diago_func(arg: bool) -> None: \"\"\" Function for the diago_allowed Checkbox. Switches the bool", "run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No", "- 205 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50,", "object of the program (Singleton) (class not typed to avoid import) :return: StatsHandler", "If partial, leaves walls, start and end nodes as is :param partial: True", "the Gui :return: Gui object \"\"\" # Button functions for particular cases: def", "grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object, file) def", "def get_neighbor_dt() -> float: \"\"\" Get the time taken for preprocessing the grid's", "the child-most Gui is forbidden if not click_used: for button in self.objects: try:", "Checkboxes this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.display =", "objects to be added to the Gui as \"attribute\": object. The dict is", "fit current screen size, draw the grid :return: None \"\"\" # scale grid", "lists in cst.to_display (see constants.py module), for special cases there is an early", "pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"]", "and remove the Gui from its parent Gui. Use as follows: from the", "dict() # It's a bit ugly doing it like this but it's the", "process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors", "of type pg.KEYDOWN event :return: None \"\"\" for button in self.objects: if button.__class__", "the main_gui :return: True if pathfinder is ready to run \"\"\" if pathfinder_obj.algo:", "as is :param partial: True if resetting search, False if resetting grid :return:", "- text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3)) dimension_butt = Button((0, 0),", "display() method on all of its objects. :param attributes: Call the display() method", "(15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom +", "= SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom +", "return False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could add pause/unpause timers...", "by injecting the necessary objects as arguments. First define the necessary functions for", "in cst.to_display (see constants.py module), for special cases there is an early and", "a flexible GUI object to be able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"]", "added to the Gui as \"attribute\": object. The dict is defined one line", "tkinter from tkinter import filedialog from random import randrange from typing import *", "\"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"]", "import tkinter from tkinter import filedialog from random import randrange from typing import", "and remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\" Initialise", "events to it recursively until used. Specific kwargs: - external=True Allows clicks outside", "0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button", "TYPING: *additional: (event.type, function(event, *args) -> None , *args), ... :param additional: Allows", ":param attributes: Call the display() method on additional attributes of the gui :return:", "Call the display() method on additional attributes of the gui :return: None \"\"\"", "node.came_from = None if node.update_color() is not cst.BLACK: if not partial: node.status &=", "method on all of its objects. :param attributes: Call the display() method on", "remove the Gui from its parent Gui. Use as follows: from the main", "0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes: for node", "30), 50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height()", "* text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width())", "fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \",", "-> None: \"\"\" Main loop of the stats handler, it's the only thing", "\"\"\" Display all Stat object in self.stats. :return: None \"\"\" self.background.display() for stat", "stats handler :return: True if it's time to display \"\"\" if pg.time.get_ticks() >=", "defined, adds a popup Gui to the main_gui :return: True if pathfinder is", "len(Gui.text_input) <= 1: Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input", "root, to allow the child Gui to remove itself # from the parent's", "priority_obj: The object with priority :return: True if click was used, else False", "= obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If priority", "Gui from the dict, with pathfinder and grid added as kwargs. :param pathfinder_obj:", "objects in dict_object MUST have a display() method and can have an is_clicked()", "(cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\":", "pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\"", "bool) -> None: \"\"\" Function for the display moves Checkbox. Switches the bool", "\"Random walls\", func=random_walls) # algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\",", "kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui:", "click events to child Gui if one is in its objects (LIMITED TO", "pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if check_conditions():", "well as possible, might make grid go out of borders nodes_width = grid_obj.width", "of a DropDownButton's rect is allowed and clicks will be registered, but is", "this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.display = arg", "grid object from the Grids folder (or other), update values, scale the grid", "is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) -> Gui:", "blits orders are appended to one of the lists in cst.to_display (see constants.py", "visualizer module, most dependency issues are fixed by injecting the necessary objects as", "\"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15,", "used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and not priority_obj.external: used =", "try: obj.is_disabled = True except AttributeError: continue obj.display() def check_conditions() -> bool: \"\"\"", "*args) -> None , *args), ... :param additional: Allows entering specific (event.type, function,", "if direct: with open(direct, \"rb\") as file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def", "StatsHandler object \"\"\" # defining stats getters def get_algo_dt() -> float: \"\"\" Get", "in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for", "attributes of the gui :return: None \"\"\" for obj in self.objects: obj.display() for", "to be redrawn :param kwargs: See the necessary kwargs above :return: None \"\"\"", "\"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end", "nodes_height node.width = nodes_width node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height))", "Updates the attributes of the grid object and the values of the grid_n_wide", "import constants as cst from classes import * folder_path = getcwd() grid_path =", "in is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui)", "\"\"\" self.is_activated = False for column in grid_obj.all_nodes: for node in column: if", "the popup :param root: Parent/root Gui :param child: Child Gui to remove from", "time from the pathfinder or the time since it started processing\"\"\" return round(pathfinder.algo_dt,", "was used, else False \"\"\" used = False if isinstance(priority_obj, Gui): # Inject", "Used to specify \"grid\" if it needs to be redrawn :param kwargs: See", "receive parameters (event, args). (I couldn't get the typing right...) :return: None \"\"\"", "grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No", "def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if any of the Gui's objects", "parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated", "type pg.KEYDOWN event :return: None \"\"\" for button in self.objects: if button.__class__ is", "remove itself from the parent's objects once terminated :return: True if click was", ":param arg: Not needed, but is included for the functions of other StateButtons", "= start_height - nodes_height grid_obj.display() def update_values(save_object: dict) -> None: \"\"\" Updates the", "on every press if run conditions are met. Call pathfinder.init_search methode if pathfinder.search_is_init", ":param dict_object: all objects in dict_object MUST have a display() method and can", "creates all buttons by the init_gui function \"\"\" from pickle import load, dump", "Not needed, but is included for the functions of other StateButtons :return: None", "couldn't get the typing right...) :return: None \"\"\" for event in self.events: if", "neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR", "the Enter key is pressed, it's confirm_input(self.text_input) method will be called with the", "bool) -> None: \"\"\" Function for the diago_allowed Checkbox. Switches the bool of", "the main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def", "Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled =", "main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom +", "the grid :return: None \"\"\" # scale grid to screen, as well as", "[] self.events = [] for name, obj in dict_object.items(): setattr(self, name, obj) setattr(self,", "displaying Stat objects on a Background (Background is important so the anti aliased", "object in self.stats. :return: None \"\"\" self.background.display() for stat in self.stats: stat.display() def", "folder_path = getcwd() grid_path = join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes,", "StateButton and obj is not main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError: continue", "cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) -> Gui: \"\"\" Creates", "Delay between updates of the stats in ms :param kwargs: add stat objects", "(cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x", "the function for clicking on buttons, using input buttons, displaying stats, popups and", "additional: Allows entering specific (event.type, function, (*args)) tuples to handle other events. The", "= GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10),", "\"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 - 25, \"default\":", "in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) -> None: \"\"\" Handle click and", "position depend on the previous ones Last we create the Gui from the", "event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed on the popup window :return:", "= False if isinstance(priority_obj, Gui): # Inject parent Gui dependency as root, to", "button in self.objects: if button.__class__ is TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE:", "get_fps() -> float: \"\"\" Get the fps of the program\"\"\" return round(cfg.clock.get_fps(), 1)", "self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None:", ":param partial: True if resetting search, False if resetting grid :return: None\"\"\" pathfinder_obj.running", "scale the grid and show all changes :return: None \"\"\" def scale_and_draw() ->", "10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func)", "nodes_height node.height = nodes_height node.width = nodes_width node.position = (position_x, position_y) node.rect =", "the stats handler :return: True if it's time to display \"\"\" if pg.time.get_ticks()", "press if run conditions are met. Call pathfinder.init_search methode if pathfinder.search_is_init is False.", "main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed on the popup window :return: A", "AttributeError: pass return used click = pg.Rect(mouse_pos, (1, 1)) click_used = False prio", "text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width()) /", "link to the Gui (class is not typed to avoid import) :param grid_obj:", "be called once it has been initialised :return: None \"\"\" if self.timer(): self.display()", "*attributes: str) -> None: \"\"\" Call display() method on all of its objects.", "pass down its click events to it recursively until used. Specific kwargs: -", "from the parent's objects once terminated :return: True if click was used, else", "/ 2, (cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\":", "if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start", "func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom", "injecting the necessary objects as arguments. First define the necessary functions for all", "of all the objects to be added to the Gui as \"attribute\": object.", "the pathfinder.running attribute on and off on every press if run conditions are", "exists import tkinter from tkinter import filedialog from random import randrange from typing", "to the Gui (class is not typed to avoid import) :param grid_obj: Grid", "100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height: \", func=generate)", "classes import * folder_path = getcwd() grid_path = join(folder_path, \"Grids\") if not exists(grid_path):", "else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int],", "will be incremented during the loop position_y = start_height - nodes_height position_x =", "Grids folder (or other), update values, scale the grid and show all changes", "borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height =", "None \"\"\" for event in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type", "StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom +", "main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\")", "5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\",", "save object loaded from pickle file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start", "file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\" Exit program.", "-> float: \"\"\" Get algorithm process time from the pathfinder or the time", "\"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str,", "125 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes", "+= nodes_width for node in column: position_y += nodes_height node.height = nodes_height node.width", "-> None: \"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI #####################################################################################################", "None: \"\"\" Function for the diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute.", "30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10),", "30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() ->", "Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled = False obj.display() except AttributeError:", "def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the popup_gui from the root/parent Gui.", "0 for column in grid_obj.all_nodes: for node in column: node.neighbors = None node.came_from", "stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x +", "(see classes.py for prefabricated classes to use) :param kwargs: add attributes to the", "grid_n_wide and grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool", "int = 200, **kwargs: Stat) -> None: \"\"\" Creates a Singleton Stats handler", "== pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in", "import) :return: StatsHandler object \"\"\" # defining stats getters def get_algo_dt() -> float:", "end nodes as is :param partial: True if resetting search, False if resetting", "[] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0", "10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] =", "algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True, ext_close=True)", "Creates a Singleton Stats handler for displaying Stat objects on a Background (Background", "the grid and show all changes :return: None \"\"\" def scale_and_draw() -> None:", "random_walls(self: GridButton) -> None: \"\"\" Function for the random walls button, 10% of", "= save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display()", "the Stat objects Then Instantiate the Stat object as kwargs for the StatsHandler", "pg.KEYDOWN event :return: None \"\"\" for button in self.objects: if button.__class__ is TextInputButton", "Stat objects on a Background (Background is important so the anti aliased text", "node in column: position_y += nodes_height node.height = nodes_height node.width = nodes_width node.position", "self.is_activated = False for column in grid_obj.all_nodes: for node in column: if randrange(11)", "pygame as pg import config as cfg import constants as cst from classes", "the Gui (class is not typed to avoid import) :param grid_obj: Grid object", "display_steps if False, and display the buttons the show change. :param arg: display_moves_button.is_activated,", "/ 2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return", "by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get the fps", "child Gui to remove itself # from the parent's objects when it is", "def reset(partial: bool = False) -> None: \"\"\" Stops the pathfinder, and reset", "\"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"]", "search, False if resetting grid :return: None\"\"\" pathfinder_obj.running = False if not partial:", "used click = pg.Rect(mouse_pos, (1, 1)) click_used = False prio = check_priority() if", "remove from Parent :param attributes: Used to specify \"grid\" if it needs to", "\"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the StatsHandler", "reset the grid. If partial, leaves walls, start and end nodes as is", "= background self.chrono = 0 self.increment = increment def display(self) -> None: \"\"\"", "# from the parent's objects when it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self)", "+ 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save", "obj.display() for key in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) -> None: \"\"\"", "window Gui with a single OK button to dismiss the message and remove", "if any of the Gui's objects require priority on clicks (currently only for", "to avoid import) :param grid_obj: Grid object to link to the Gui :return:", "-> None: \"\"\" Disable Buttons that cannot be used during pathfinding :return: None", "+ 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y", "must be activated for this function to run, once the Enter key is", "-> bool: \"\"\" Check that an algorithm is defined, the grid has a", "load, dump from os import getcwd, mkdir from os.path import join, exists import", "= Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\",", "handle other events. The function will receive parameters (event, args). (I couldn't get", "create a dict of all the objects to be added to the Gui", "object representing the popup window \"\"\" def ok_func(root: Gui, child: Gui) -> None:", "= [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time =", "= grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 #", "Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\" Get", ":return: object with priority \"\"\" priority_to = None for obj in self.objects: if", "to link to the Gui (class is not typed to avoid import) :param", "algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui)", "text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3))", "pickle import load, dump from os import getcwd, mkdir from os.path import join,", "be used during pathfinding :param arg: Not needed, but is included for the", "Any) -> StatsHandler: \"\"\" Initialise the StatsHandler object, with injected dependency to the", "# could add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or", "if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func,", "-1: # update display to show disabled buttons before pathfinder starts looping handle_display()", "- dimension_butt.rect.h) / 2 + 100 / 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY,", "return click_used # TODO: alot of setter functions could be shifted into a", "the stats will be displayed (positioning is not automatic) :param increment: Delay between", "be displayed (positioning is not automatic) :param increment: Delay between updates of the", "self.__dict__.update(kwargs) self.stats = [obj for obj in self.__dict__.values() if obj.__class__ is Stat] self.background", "an early and a late channel. :return: None \"\"\" for group in cst.to_display:", "background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() - bg_height) / 2),", "size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] =", "popup window \"\"\" def ok_func(root: Gui, child: Gui) -> None: \"\"\" Removes the", "a Gui object is added to its objects, it will pass down its", "\"\"\" Handle clicking events, will recursively pass down click events to child Gui", "the pathfinder.algo attribute to the algorithm associated with the AlgoButton :param self: inject", "kwargs: See the necessary kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except", "\"\"\" Function for the diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute. :param", "Singleton Stats handler for displaying Stat objects on a Background (Background is important", "in is_clicked :return: None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) -> None:", "conditions are met. Call pathfinder.init_search methode if pathfinder.search_is_init is False. Disable Buttons that", "main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed on the", "click and keyboard input events by redistributing to handle click or handle input", ":return: None \"\"\" for obj in self.objects: obj.display() for key in attributes: self.__dict__[key].display()", "if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input = \"\" else: Gui.text_input", "in column: if randrange(11) == 0: if node is not grid_obj.start and node", "starting node and an ending node. If no end or start node is", "end or start node is defined, adds a popup Gui to the main_gui", "for prefabricated classes to use) :param kwargs: add attributes to the gui, used", "else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input", "50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() -", "in self.__dict__.values() if obj.__class__ is Stat] self.background = background self.chrono = 0 self.increment", "import * import pygame as pg import config as cfg import constants as", "external=True Allows clicks outside the Gui's objects - ext_close=True Removes Gui from parent", "fills and blits to the window then clear channel, fills are made before", "late channel. :return: None \"\"\" for group in cst.to_display: for i, j in", "(I couldn't get the typing right...) :return: None \"\"\" for event in self.events:", "check_priority() if prio: click_used = handle_priority(prio) # Clicking outside the child-most Gui is", "gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except", "priority_to = None for obj in self.objects: if isinstance(obj, Gui): priority_to = obj", "Grid object to link to the Gui :return: Gui object \"\"\" # Button", "from its parent Gui. Use as follows: from the main Gui, on event:", "because all Button's position depend on the previous ones Last we create the", "in grid_obj.all_nodes: for node in column: node.neighbors = None node.came_from = None if", "\\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus to", "init_gui function \"\"\" from pickle import load, dump from os import getcwd, mkdir", "make grid go out of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height =", "of its objects. :param attributes: Call the display() method on additional attributes of", "pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) -> None: \"\"\" Function for the apply_rsr", "if obj.__class__ is not StateButton and obj is not main_gui_handler.exit_button: try: obj.is_disabled =", "\"\"\" Creates a gui window or screen, if a Gui object is added", "it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and not", "self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event,", "205 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes", "the main_gui if display_steps if False, and display the buttons the show change.", "pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt", "the fps of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect),", "pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1:", "save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15,", "buttons and display changes :param save_object: save object loaded from pickle file :return:", "2) def get_rsr_dt() -> float: \"\"\" Get the time taken for preprocessing Rectangular", "4 * text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height -", "for button in self.objects: if button.__class__ is TextInputButton and button.is_activated: if event.key ==", "None \"\"\" for obj in self.objects: obj.display() for key in attributes: self.__dict__[key].display() def", "as a class attribute. A TextInputButton must be activated for this function to", "because it will be incremented during the loop position_y = start_height - nodes_height", "-> StatsHandler: \"\"\" Initialise the StatsHandler object, with injected dependency to the pathfinder", "external clicks :param dict_object: all objects in dict_object MUST have a display() method", "from the Grids folder (or other), update values, scale the grid and show", "with open(direct, \"rb\") as file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func() ->", "column: if randrange(11) == 0: if node is not grid_obj.start and node is", "as arguments. First define the necessary functions for all buttons that will be", "remove itself # from the parent's objects when it is terminated used =", "[] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time =", "not cst.BLACK: if not partial: node.status &= ~(Node.WALL | Node.END | Node.START) node.status", "taken for preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def", "redraw it to cover the popup :param root: Parent/root Gui :param child: Child", "grid :return: None \"\"\" # scale grid to screen, as well as possible,", "cases: def random_walls(self: GridButton) -> None: \"\"\" Function for the random walls button,", "It also creates all buttons by the init_gui function \"\"\" from pickle import", "func, *args in additional: if event.type == user_event: func(event, *args) self.events.clear() def handle_input(self,", "nodes as is :param partial: True if resetting search, False if resetting grid", "100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width: \", func=generate)", "it's the only thing that needs to be called once it has been", "# Clicking outside the child-most Gui is forbidden if not click_used: for button", "grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display()", "attribute on and off on every press if run conditions are met. Call", "CHILD GUI). If any of its objects is clicked, the object's is_clicked() method", "The object with priority :return: True if click was used, else False \"\"\"", "filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as file: save_object_ = load(file) update_values(save_object_) scale_and_draw()", ":return: None \"\"\" for event in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif", "child=algo_gui) def generate() -> None: \"\"\" Calls the generate method of the grid", "\"\"\" self.background.display() for stat in self.stats: stat.display() def timer(self) -> bool: \"\"\" Handles", "for stat in self.stats: stat.display() def timer(self) -> bool: \"\"\" Handles timing of", "= TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom", ":param child: Child Gui to remove from parent :return: None \"\"\" root.grid.disabled =", "Get the time taken for preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\" return", "GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random", "= False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier =", "def check_conditions() -> bool: \"\"\" Check that an algorithm is defined, the grid", "object where the stats will be displayed (positioning is not automatic) :param increment:", "in cst.to_display: for i, j in group: if i.__class__ is pg.Surface: cfg.window.blit(i, j)", "that an algorithm is defined, the grid has a starting node and an", "Last we create the Gui from the dict, with pathfinder and grid added", "40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\":", "None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement,", "start_height - nodes_height position_x = cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes: position_x", "and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used # TODO: alot", "+ 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x +", "-> None: \"\"\" Creates a gui window or screen, if a Gui object", "None: \"\"\" Function for the random walls button, 10% of the nodes in", "False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\" Calls the generate method of", "root: 'Gui' = None) -> bool: \"\"\" Handle clicking events, will recursively pass", "DropDownButton's rect is allowed and clicks will be registered, but is forbidden for", "remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 * text_surf.get_width()", "= not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool) -> None:", "\"\"\" Creates a Pop-up window Gui with a single OK button to dismiss", "Scale the grid object to fit current screen size, draw the grid :return:", "\"\"\" # scale grid to screen, as well as possible, might make grid", "to be added to the Gui as \"attribute\": object. The dict is defined", "avoid import) :return: StatsHandler object \"\"\" # defining stats getters def get_algo_dt() ->", "def get_path_len() -> float: \"\"\" Get the lenght of the shortest path found", "is added to its objects, it will pass down its click events to", "prio = check_priority() if prio: click_used = handle_priority(prio) # Clicking outside the child-most", "-> bool: \"\"\" Handles timing of the stats handler :return: True if it's", "= GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10),", "pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0", "a display() method and can have an is_clicked() method to handle clicks on", "dict is defined one line at a time because all Button's position depend", "in dict_object MUST have a display() method and can have an is_clicked() method", "all necessary attributes, also reset the grid. If partial, leaves walls, start and", "the user interface (GUI) for the pathfinding visualizer. It handles the function for", "to keep reference to the previous entry. # Also I wanted to make", "time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess", "= Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None) ->", "child=priority_obj) except AttributeError: pass return used click = pg.Rect(mouse_pos, (1, 1)) click_used =", "Gui's objects require priority on clicks (currently only for DropDownButton and any child", "typed to avoid import) :param grid_obj: Grid object to link to the Gui", "increment: int = 200, **kwargs: Stat) -> None: \"\"\" Creates a Singleton Stats", "Initialise the StatsHandler object, with injected dependency to the pathfinder to access stats", "algorithm is defined, the grid has a starting node and an ending node.", "walls, start and end nodes as is :param partial: True if resetting search,", "-1, \"max\": 9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run:", "None: \"\"\" Load a grid object from the Grids folder (or other), update", "representing the popup window \"\"\" def ok_func(root: Gui, child: Gui) -> None: \"\"\"", "if self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the StatsHandler object,", "pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in additional:", "j in group: if i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear()", "it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] =", "size, draw the grid :return: None \"\"\" # scale grid to screen, as", "but is forbidden for child Gui :param priority_obj: The object with priority :return:", "popup necessary kwargs: - 'root': Parent/root Gui - 'child': Child Gui to remove", "terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and not priority_obj.external: used", "True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\": 0}, (15,", "or handle input methods. TYPING: *additional: (event.type, function(event, *args) -> None , *args),", "It's a bit ugly doing it like this but it's the only way", "the window then clear channel, fills are made before blits. All the program's", "button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input = \"\" else:", "'child': Child Gui to remove from Parent :param attributes: Used to specify \"grid\"", "def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If priority object was found look", "parent Gui dependency as root, to allow the child Gui to remove itself", "buttons, displaying stats, popups and setting start/end nodes It also creates all buttons", "10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset)", "if click was used, else False \"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\"", "2 * text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj = (text_surf, ((bg_width -", "+ 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300,", "method will be called if it has one. :param mouse_pos: Coordinates of the", "arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return: None", "0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"]", "return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\" Get the time taken for", "= 0 self.increment = increment def display(self) -> None: \"\"\" Display all Stat", "first because it will be incremented during the loop position_y = start_height -", "Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input += event.unicode", "file) def load_grid() -> None: \"\"\" Load a grid object from the Grids", "Gui]) -> bool: \"\"\" If priority object was found look if any clicks", "remove from parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root,", "\"\"\" Handle click and keyboard input events by redistributing to handle click or", "pathfinder is ready to run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return", "= StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5,", "function for clicking on buttons, using input buttons, displaying stats, popups and setting", "+ 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom +", "grid_obj.end grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far =", "display changes :param save_object: save object loaded from pickle file :return: None \"\"\"", "\", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x", "try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input = \"\" def __init__(self,", "try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError:", "nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25", "RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom", "True if resetting search, False if resetting grid :return: None\"\"\" pathfinder_obj.running = False", "for preprocessing the grid's nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def", "cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x +", "terminated :return: True if click was used, else False \"\"\" def check_priority() ->", "It handles the function for clicking on buttons, using input buttons, displaying stats,", "self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\" Calls the", "= 0 pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed", "on the popup window :return: A Gui object representing the popup window \"\"\"", "dict) -> None: \"\"\" Updates the attributes of the grid object and the", "pathfinding :return: None \"\"\" for obj in main_gui_handler.objects: if obj.__class__ is not StateButton", "is pressed, it's confirm_input(self.text_input) method will be called with the injected input. :param", "for user_event, func, *args in additional: if event.type == user_event: func(event, *args) self.events.clear()", "the grid_n_wide and grid_n_high buttons and display changes :param save_object: save object loaded", "- 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in", "injected input. :param event: must be of type pg.KEYDOWN event :return: None \"\"\"", "return used click = pg.Rect(mouse_pos, (1, 1)) click_used = False prio = check_priority()", "# algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo),", "module handles the user interface (GUI) for the pathfinding visualizer. It handles the", "all changes :return: None \"\"\" def scale_and_draw() -> None: \"\"\" Scale the grid", "Removes Gui from parent Gui on external clicks :param dict_object: all objects in", "= \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN:", "\"\"\" Check if any of the Gui's objects require priority on clicks (currently", "Gui from its parent Gui. Use as follows: from the main Gui, on", "attributes: Used to specify \"grid\" if it needs to be redrawn :param kwargs:", "def update_values(save_object: dict) -> None: \"\"\" Updates the attributes of the grid object", "in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 -", "grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"]", "nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\"", "Disable Buttons that cannot be used during pathfinding :param arg: Not needed, but", "from the dict, with pathfinder and grid added as kwargs. :param pathfinder_obj: Pathfinder", "= grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the first because it", ":return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"]", "one. :param mouse_pos: Coordinates of the cursor :param root: parent Gui, allows the", "Gui for the visualizer module, most dependency issues are fixed by injecting the", "as kwargs for the StatsHandler :param pathfinder: Pathfinder object of the program (Singleton)", "injected in is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler,", "Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15,", "# scale grid to screen, as well as possible, might make grid go", "for the StatsHandler :param pathfinder: Pathfinder object of the program (Singleton) (class not", "Grid object as a Pickle file in the Grids folder (or other) :return:", "the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get the fps of", "pass down click events to child Gui if one is in its objects", "| Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled = False obj.display() except", "a list of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj in self.__dict__.values()", "other events. The function will receive parameters (event, args). (I couldn't get the", "functions for the Stat objects Then Instantiate the Stat object as kwargs for", "grid=grid_obj) return main_gui_handler def handle_display() -> None: \"\"\" Does all the fills and", "Specific kwargs: - external=True Allows clicks outside the Gui's objects - ext_close=True Removes", "Gui object representing the popup window \"\"\" def ok_func(root: Gui, child: Gui) ->", "if grid_obj.start is not None: temp = grid_obj.start grid_obj.start = None temp.is_start =", "only way I know to keep reference to the previous entry. # Also", "5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30),", "-> float: \"\"\" Get the time taken for preprocessing the grid's nodes' neighbors", "arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool)", "of the stats handler :return: True if it's time to display \"\"\" if", "# TODO: try resetting the focus to pygame def save() -> None: \"\"\"", "SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler", "def apply_rsr_func(arg: bool) -> None: \"\"\" Function for the apply_rsr Checkbox. Switches the", "a get_data(gui) method by the pathfinder to unclutter this # module and remove", "set_algo(self: AlgoButton) -> None: \"\"\" Set the pathfinder.algo attribute to the algorithm associated", "and show all changes :return: None \"\"\" def scale_and_draw() -> None: \"\"\" Scale", "grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct", "None \"\"\" for button in self.objects: if button.__class__ is TextInputButton and button.is_activated: if", "False if not partial: if grid_obj.start is not None: temp = grid_obj.start grid_obj.start", "+= nodes_height node.height = nodes_height node.width = nodes_width node.position = (position_x, position_y) node.rect", "be redrawn :param kwargs: See the necessary kwargs above :return: None \"\"\" try:", "of the stats in ms :param kwargs: add stat objects as attributes of", "ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 +", "group in cst.to_display: for i, j in group: if i.__class__ is pg.Surface: cfg.window.blit(i,", "timing of the stats handler :return: True if it's time to display \"\"\"", "None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] =", "the pathfinder or the time since it started processing\"\"\" return round(pathfinder.algo_dt, 2) def", "def get_algo_dt() -> float: \"\"\" Get algorithm process time from the pathfinder or", "cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y +", "pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time", "If priority object was found look if any clicks affect it, clicking outside", "play_pause(arg: bool = None) -> None: \"\"\" Switches the pathfinder.running attribute on and", "return popup class StatsHandler: def __init__(self, background: Background, increment: int = 200, **kwargs:", "button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input", "= TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 - 25, \"default\": 100, \"value\": 100},", "None: \"\"\" Stops the pathfinder, and reset all necessary attributes, also reset the", "4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() -", "node.width = nodes_width node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y", "screen, if a Gui object is added to its objects, it will pass", "grid's nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float:", "but is included for the functions of other StateButtons :return: None \"\"\" def", "not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to show disabled buttons", "def get_rsr_dt() -> float: \"\"\" Get the time taken for preprocessing Rectangular Symmetry", "main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the", "main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus to pygame def save() -> None:", "try resetting the focus to pygame def save() -> None: \"\"\" Save the", "= arg def apply_rsr_func(arg: bool) -> None: \"\"\" Function for the apply_rsr Checkbox.", "+ 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display()", "if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the popup_gui", "previous ones Last we create the Gui from the dict, with pathfinder and", "Buttons (see classes.py for prefabricated classes to use) :param kwargs: add attributes to", "has a starting node and an ending node. If no end or start", "exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the popup_gui from the", "grid_obj.display() def update_values(save_object: dict) -> None: \"\"\" Updates the attributes of the grid", "Dict[str, Any], **kwargs: Any) -> None: \"\"\" Creates a gui window or screen,", "other) :return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object =", "StatsHandler: \"\"\" Initialise the StatsHandler object, with injected dependency to the pathfinder to", "as kwargs. :param pathfinder_obj: Pathfinder object to link to the Gui (class is", "the object's is_clicked() method will be called if it has one. :param mouse_pos:", "-> float: \"\"\" Get the lenght of the shortest path found by the", "= False pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path", "Gui. Disable the grid from receiving input and redraw it to cover the", "+ 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True,", "is not automatic) :param increment: Delay between updates of the stats in ms", "once the Enter key is pressed, it's confirm_input(self.text_input) method will be called with", "if len(Gui.text_input) <= 1: Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] =", "pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\" Get the lenght of", "None: \"\"\" Function for the display moves Checkbox. Switches the bool of pathfinder.display_steps", "\"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15,", "increment def display(self) -> None: \"\"\" Display all Stat object in self.stats. :return:", "object with priority :return: True if click was used, else False \"\"\" used", "the time taken for preprocessing the grid's nodes' neighbors from the pathfinder\"\"\" return", "the main Gui for the visualizer module, most dependency issues are fixed by", "cst from classes import * folder_path = getcwd() grid_path = join(folder_path, \"Grids\") if", "main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to show disabled buttons before pathfinder starts", "\"\"\" Get algorithm process time from the pathfinder or the time since it", "main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler", ":param attributes: Used to specify \"grid\" if it needs to be redrawn :param", "\"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True,", "the focus to pygame def save() -> None: \"\"\" Save the Grid object", "Disable Buttons that cannot be used during pathfinding :return: None \"\"\" for obj", "- nodes_width for column in grid_obj.all_nodes: position_x += nodes_width for node in column:", "float: \"\"\" Get the time taken for preprocessing Rectangular Symmetry Reduction from the", "we create the Gui from the dict, with pathfinder and grid added as", "met. Call pathfinder.init_search methode if pathfinder.search_is_init is False. Disable Buttons that cannot be", "priority_to = obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If", "be called with the injected input. :param event: must be of type pg.KEYDOWN", "moves Checkbox. Switches the bool of pathfinder.display_steps attribute. Disables the run interval and", "grid and show all changes :return: None \"\"\" def scale_and_draw() -> None: \"\"\"", "if obj.__class__ is Stat] self.background = background self.chrono = 0 self.increment = increment", "and grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool =", "clicks outside the Gui's objects - ext_close=True Removes Gui from parent Gui on", "text_input = \"\" def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None: \"\"\"", "pathfinder.search_is_init is False. Disable Buttons that cannot be used during pathfinding :param arg:", "# defining stats getters def get_algo_dt() -> float: \"\"\" Get algorithm process time", "it needs to be redrawn :param kwargs: See the necessary kwargs above :return:", "apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] =", "object's is_clicked() method will be called if it has one. :param mouse_pos: Coordinates", "Gui is forbidden if not click_used: for button in self.objects: try: if not", "as \"attribute\": object. The dict is defined one line at a time because", "main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the first because it will be incremented", "grid_obj.all_nodes: position_x += nodes_width for node in column: position_y += nodes_height node.height =", "stats in ms :param kwargs: add stat objects as attributes of the stat", "Enter key is pressed, it's confirm_input(self.text_input) method will be called with the injected", "main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\" Calls the generate", "use) :param kwargs: add attributes to the gui, used for dependency injection. \"\"\"", "def play_pause(arg: bool = None) -> None: \"\"\" Switches the pathfinder.running attribute on", "Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess", "know to keep reference to the previous entry. # Also I wanted to", "1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] =", "one line at a time because all Button's position depend on the previous", "algo_gui = Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15,", "is clicked, the object's is_clicked() method will be called if it has one.", "stats handler, it's the only thing that needs to be called once it", "StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() -", "\"\"\" Creates a Singleton Stats handler for displaying Stat objects on a Background", "100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 *", "main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load", "at a time because all Button's position depend on the previous ones Last", "file in the Grids folder (or other) :return: None \"\"\" tkinter.Tk().withdraw() direct =", "is forbidden for child Gui :param priority_obj: The object with priority :return: True", "with pathfinder and grid added as kwargs. :param pathfinder_obj: Pathfinder object to link", "= False for column in grid_obj.all_nodes: for node in column: if randrange(11) ==", "always injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton)", "cannot be used during pathfinding :param arg: Not needed, but is included for", "True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass", "ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] =", "priority :return: True if click was used, else False \"\"\" used = False", "for special cases there is an early and a late channel. :return: None", "stats, popups and setting start/end nodes It also creates all buttons by the", "continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting", "cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] =", "Gui to remove itself # from the parent's objects when it is terminated", "main_gui_handler.objects: try: obj.is_disabled = False obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled =", "needs to be called once it has been initialised :return: None \"\"\" if", "StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y", "cursor :param root: parent Gui, allows the child to remove itself from the", "an ending node. If no end or start node is defined, adds a", "buttons before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False)", "buttons that will be added to the Gui. Second, create a dict of", "to the window then clear channel, fills are made before blits. All the", "this parameter is always injected in is_clicked :return: None \"\"\" self.is_activated = False", "the Grids folder (or other) :return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\")", "is_clicked :return: None \"\"\" pathfinder_obj.display = arg # if \"wait_time_button\" in gui.__dict__.keys() and", "will be called if it has one. :param mouse_pos: Coordinates of the cursor", "\"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right", "= None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running", "ready to run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True else:", "[] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0", "pathfinder to access stats values. First define the getter functions for the Stat", "= StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom", "line at a time because all Button's position depend on the previous ones", "- nodes_height position_x = cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes: position_x +=", "if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update", "reference to the previous entry. # Also I wanted to make a flexible", "+ 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 35),", "TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 - 25, \"default\": 100, \"value\": 100}, (15,", "= TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 - 25, \"default\": 100, \"value\": 100},", "\"\" def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None: \"\"\" Creates a", "this but it's the only way I know to keep reference to the", "defining stats getters def get_algo_dt() -> float: \"\"\" Get algorithm process time from", "Stats handler for displaying Stat objects on a Background (Background is important so", "0 pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed =", "save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path)", "= nodes_height node.width = nodes_width node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width,", "cases there is an early and a late channel. :return: None \"\"\" for", "method to handle clicks on Buttons (see classes.py for prefabricated classes to use)", "popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler: def __init__(self,", "None: \"\"\" Handle click and keyboard input events by redistributing to handle click", "button for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30),", "events by redistributing to handle click or handle input methods. TYPING: *additional: (event.type,", "is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and not priority_obj.external:", "the functions of other StateButtons :return: None \"\"\" def disable_buttons() -> None: \"\"\"", "##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] =", "main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\":", "in the Grids folder (or other) :return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path,", "active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button for button in", "input events by redistributing to handle click or handle input methods. TYPING: *additional:", "if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to show disabled", "* import pygame as pg import config as cfg import constants as cst", ", *args), ... :param additional: Allows entering specific (event.type, function, (*args)) tuples to", "the lists in cst.to_display (see constants.py module), for special cases there is an", "be shifted into a get_data(gui) method by the pathfinder to unclutter this #", "filedialog from random import randrange from typing import * import pygame as pg", "- text_surf.get_height()) / 3)) dimension_butt = Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() -", "Checkboxes this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.diago =", "main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict()", "= False prio = check_priority() if prio: click_used = handle_priority(prio) # Clicking outside", "events. The function will receive parameters (event, args). (I couldn't get the typing", "injects the n_wide and n_high dependencies from the main_gui's grid_n_wide and grid_n_high TextInputButtons'", "list of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj in self.__dict__.values() if", "is_clicked :return: None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) -> None: \"\"\"", "Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] =", "for the visualizer module, most dependency issues are fixed by injecting the necessary", "attributes, also reset the grid. If partial, leaves walls, start and end nodes", "attribute to the algorithm associated with the AlgoButton :param self: inject reference to", "blits. All the program's fill and blits orders are appended to one of", "priority object was found look if any clicks affect it, clicking outside of", "args). (I couldn't get the typing right...) :return: None \"\"\" for event in", "+ 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\",", "Clicking outside the child-most Gui is forbidden if not click_used: for button in", "\"\"\" # defining stats getters def get_algo_dt() -> float: \"\"\" Get algorithm process", "<= 1: Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif", "affect it, clicking outside of a DropDownButton's rect is allowed and clicks will", "Button's position depend on the previous ones Last we create the Gui from", "width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 - 25,", "def get_fps() -> float: \"\"\" Get the fps of the program\"\"\" return round(cfg.clock.get_fps(),", "randrange(11) == 0: if node is not grid_obj.start and node is not grid_obj.end:", "anti aliased text does not become opaque. :param background: Background object where the", "str) -> None: \"\"\" Call display() method on all of its objects. :param", "any child Gui that might be spawned during the program :return: object with", ":param mouse_pos: Coordinates of the cursor :param root: parent Gui, allows the child", "the AlgoButton :param self: inject reference to self. For AlgoButton, this parameter is", "self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the StatsHandler object, with", "~(Node.WALL | Node.END | Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED |", "most dependency issues are fixed by injecting the necessary objects as arguments. First", "registered, but is forbidden for child Gui :param priority_obj: The object with priority", "Gui on external clicks :param dict_object: all objects in dict_object MUST have a", "spawned during the program :return: object with priority \"\"\" priority_to = None for", ":param priority_obj: The object with priority :return: True if click was used, else", "the injected input. :param event: must be of type pg.KEYDOWN event :return: None", "(cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x", "parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.display = arg #", ":return: True if click was used, else False \"\"\" used = False if", "StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top),", "event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input = \"\" else: Gui.text_input =", "not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg:", "-> None: \"\"\" Load a grid object from the Grids folder (or other),", "the show change. :param arg: display_moves_button.is_activated, For Checkboxes this parameter is always injected", "None: \"\"\" Calls the generate method of the grid object, and injects the", "None: \"\"\" Switches the pathfinder.running attribute on and off on every press if", "to remove itself # from the parent's objects when it is terminated used", "pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in additional: if event.type == user_event: func(event,", ":return: None \"\"\" # scale grid to screen, as well as possible, might", "dict, with pathfinder and grid added as kwargs. :param pathfinder_obj: Pathfinder object to", "functions could be shifted into a get_data(gui) method by the pathfinder to unclutter", "\"\"\" Stops the pathfinder, and reset all necessary attributes, also reset the grid.", "run conditions are met. Call pathfinder.init_search methode if pathfinder.search_is_init is False. Disable Buttons", "allow the child Gui to remove itself # from the parent's objects when", "= [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\",", "a grid object from the Grids folder (or other), update values, scale the", "main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display", "isinstance(priority_obj, Gui): # Inject parent Gui dependency as root, to allow the child", "a late channel. :return: None \"\"\" for group in cst.to_display: for i, j", "return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\" Get the lenght of the", "increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt),", ":return: True if pathfinder is ready to run \"\"\" if pathfinder_obj.algo: if grid_obj.start:", "obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not", "\"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \")", "on Buttons (see classes.py for prefabricated classes to use) :param kwargs: add attributes", "show change. :param arg: display_moves_button.is_activated, For Checkboxes this parameter is always injected in", "filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with", "obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If priority object", "os.path import join, exists import tkinter from tkinter import filedialog from random import", "root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 * text_surf.get_width() bg_height", "= None) -> bool: \"\"\" Handle clicking events, will recursively pass down click", "node in column: if randrange(11) == 0: if node is not grid_obj.start and", "= None if node.update_color() is not cst.BLACK: if not partial: node.status &= ~(Node.WALL", "to dismiss the message and remove the Gui from its parent Gui. Use", "window :return: A Gui object representing the popup window \"\"\" def ok_func(root: Gui,", "-> None: \"\"\" Display all Stat object in self.stats. :return: None \"\"\" self.background.display()", "pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = []", "main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) # algo buttons algo_buttons = [AlgoButton((0, 0),", "main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30),", "Handle clicking events, will recursively pass down click events to child Gui if", "a single OK button to dismiss the message and remove the Gui from", "the attributes of the grid object and the values of the grid_n_wide and", "1)) click_used = False prio = check_priority() if prio: click_used = handle_priority(prio) #", "i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) ->", "get the typing right...) :return: None \"\"\" for event in self.events: if event.type", "TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) ->", "+ 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False,", "increment: Delay between updates of the stats in ms :param kwargs: add stat", "-> Gui: \"\"\" Creates a Pop-up window Gui with a single OK button", "obj in self.__dict__.values() if obj.__class__ is Stat] self.background = background self.chrono = 0", "require priority on clicks (currently only for DropDownButton and any child Gui that", "Gui if one is in its objects (LIMITED TO ONE CHILD GUI). If", "the Gui from its parent Gui. Use as follows: from the main Gui,", "Gui, child: Gui) -> None: \"\"\" Removes the popup_gui from the root/parent Gui.", "get_rsr_dt() -> float: \"\"\" Get the time taken for preprocessing Rectangular Symmetry Reduction", "setter functions could be shifted into a get_data(gui) method by the pathfinder to", "values. First define the getter functions for the Stat objects Then Instantiate the", "= Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1,", "right...) :return: None \"\"\" for event in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos())", "Then Instantiate the Stat object as kwargs for the StatsHandler :param pathfinder: Pathfinder", "dict of all the objects to be added to the Gui as \"attribute\":", "= 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt =", "3)) dimension_butt = Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2,", "- bg_width) / 2, (cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)), text_obj) popup", "objects, it will pass down its click events to it recursively until used.", "that needs to be called once it has been initialised :return: None \"\"\"", "handler :return: True if it's time to display \"\"\" if pg.time.get_ticks() >= self.chrono:", "create the Gui from the dict, with pathfinder and grid added as kwargs.", "not None: temp = grid_obj.start grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if", ":return: StatsHandler object \"\"\" # defining stats getters def get_algo_dt() -> float: \"\"\"", "in grid_obj.all_nodes: for node in column: if randrange(11) == 0: if node is", "3, \"max\": cfg.window.get_height() - 125 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom", "obj in self.objects: obj.display() for key in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any)", "button.confirm_input(self.text_input) except AttributeError: pass return click_used # TODO: alot of setter functions could", "len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as", "flexible GUI object to be able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] =", "grid from receiving input and redraw it to cover the popup :param root:", "click was used, else False \"\"\" used = False if isinstance(priority_obj, Gui): #", "False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer =", "A Gui object representing the popup window \"\"\" def ok_func(root: Gui, child: Gui)", "main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5,", "is ready to run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True", "__init__(self, background: Background, increment: int = 200, **kwargs: Stat) -> None: \"\"\" Creates", "event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' =", "opaque. :param background: Background object where the stats will be displayed (positioning is", "(ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms):", "self.background = background self.chrono = 0 self.increment = increment def display(self) -> None:", "walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom", "func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True,", "(\"attribute\" = object) and in a list of stats \"\"\" self.__dict__.update(kwargs) self.stats =", "from os.path import join, exists import tkinter from tkinter import filedialog from random", "grid_obj.start is not None: temp = grid_obj.start grid_obj.start = None temp.is_start = False", "OK button to dismiss the message and remove the Gui from its parent", "doing it like this but it's the only way I know to keep", "None \"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the", "user_event: func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user input,", "the pathfinder to access stats values. First define the getter functions for the", "\"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) -> None: \"\"\" Set the pathfinder.algo", "(15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\":", "and display changes :param save_object: save object loaded from pickle file :return: None", "isinstance(obj, Gui): priority_to = obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool:", "announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if check_conditions(): pathfinder_obj.running", "# Button functions for particular cases: def random_walls(self: GridButton) -> None: \"\"\" Function", "attributes of the stat handler (\"attribute\" = object) and in a list of", ":return: None \"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise", "import config as cfg import constants as cst from classes import * folder_path", "clicks on Buttons (see classes.py for prefabricated classes to use) :param kwargs: add", "\", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \",", "also reset the grid. If partial, leaves walls, start and end nodes as", "start_height = 25 # Substracting the first because it will be incremented during", "if randrange(11) == 0: if node is not grid_obj.start and node is not", "update values, scale the grid and show all changes :return: None \"\"\" def", "bool = False) -> None: \"\"\" Stops the pathfinder, and reset all necessary", "None node.came_from = None if node.update_color() is not cst.BLACK: if not partial: node.status", "True return False def main(self) -> None: \"\"\" Main loop of the stats", "grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object, file) def load_grid() -> None: \"\"\"", "+ 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\",", "\"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object, file) def load_grid() -> None:", "an algorithm is defined, the grid has a starting node and an ending", "preprocessing the grid's nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt()", "Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \",", "\"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton,", "if direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\")", "of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process", "in is_clicked :return: None \"\"\" self.is_activated = False for column in grid_obj.all_nodes: for", "will be displayed (positioning is not automatic) :param increment: Delay between updates of", "-> bool: \"\"\" If priority object was found look if any clicks affect", "\", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 - 25, \"default\":", "down its click events to it recursively until used. Specific kwargs: - external=True", "TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30,", "in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects:", "nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the first because", "of the cursor :param root: parent Gui, allows the child to remove itself", "None \"\"\" for obj in main_gui_handler.objects: if obj.__class__ is not StateButton and obj", "os import getcwd, mkdir from os.path import join, exists import tkinter from tkinter", "\"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"] =", "ms :param kwargs: add stat objects as attributes of the stat handler (\"attribute\"", "and clicks will be registered, but is forbidden for child Gui :param priority_obj:", "Coordinates of the cursor :param root: parent Gui, allows the child to remove", "pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() -> None: \"\"\" Does all the fills", "for group in cst.to_display: for i, j in group: if i.__class__ is pg.Surface:", "open(direct, \"rb\") as file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None:", "all Button's position depend on the previous ones Last we create the Gui", "the Gui's objects - ext_close=True Removes Gui from parent Gui on external clicks", "needed, but is included for the functions of other StateButtons :return: None \"\"\"", "arg: diago_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return: None", "nodes in the grid will become walls :param self: random_walls button object. For", "self.chrono: self.chrono += self.increment return True return False def main(self) -> None: \"\"\"", "bit ugly doing it like this but it's the only way I know", "dependency injection. \"\"\" self.objects = [] self.events = [] for name, obj in", "grid_obj: Grid) -> Gui: \"\"\" Initialise the main Gui for the visualizer module,", "pathfinder, and reset all necessary attributes, also reset the grid. If partial, leaves", "main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] =", "Switches the bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this parameter", "that will be added to the Gui. Second, create a dict of all", "handle_display() -> None: \"\"\" Does all the fills and blits to the window", "loop of the stats handler, it's the only thing that needs to be", "\"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() - bg_height)", "dismiss the message and remove the Gui from its parent Gui. Use as", "main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) # algo buttons algo_buttons", "main(self) -> None: \"\"\" Main loop of the stats handler, it's the only", "it has one. :param mouse_pos: Coordinates of the cursor :param root: parent Gui,", ":return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\":", "path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 35), get_path_len)) return stat_handler", "is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\"", "single OK button to dismiss the message and remove the Gui from its", "i, j in group: if i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j)", "Gui: text_input = \"\" def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None:", "gui, used for dependency injection. \"\"\" self.objects = [] self.events = [] for", "function will receive parameters (event, args). (I couldn't get the typing right...) :return:", "cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK,", "except AttributeError: pass return click_used # TODO: alot of setter functions could be", "with priority :return: True if click was used, else False \"\"\" used =", "def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user input, the entered text is", "injected in is_clicked :return: None \"\"\" pathfinder_obj.display = arg # if \"wait_time_button\" in", "go out of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height /", "it to cover the popup :param root: Parent/root Gui :param child: Child Gui", "round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK,", "*args), ... :param additional: Allows entering specific (event.type, function, (*args)) tuples to handle", ":return: None \"\"\" self.is_activated = False for column in grid_obj.all_nodes: for node in", "(event, args). (I couldn't get the typing right...) :return: None \"\"\" for event", "return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\" Get the time taken for", "resetting the focus to pygame def save() -> None: \"\"\" Save the Grid", "= nodes_width node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y =", "Checkboxes this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr =", "from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\" Get the", "pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)), text_obj)", "is defined, the grid has a starting node and an ending node. If", "(bg_height - text_surf.get_height()) / 3)) dimension_butt = Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width()", "to display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return True return", "Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key ==", "+ 10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\",", "+ 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\":", "\"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\": button for", "objects when it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not used", "be displayed on the popup window :return: A Gui object representing the popup", "kwargs: add stat objects as attributes of the stat handler (\"attribute\" = object)", "Gui. Second, create a dict of all the objects to be added to", "time because all Button's position depend on the previous ones Last we create", "update_values(save_object: dict) -> None: \"\"\" Updates the attributes of the grid object and", "def main(self) -> None: \"\"\" Main loop of the stats handler, it's the", "None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) -> None: \"\"\" Function for", "Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root:", "Initialise the main Gui for the visualizer module, most dependency issues are fixed", "and grid_n_high buttons and display changes :param save_object: save object loaded from pickle", "has been initialised :return: None \"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any) ->", "are appended to one of the lists in cst.to_display (see constants.py module), for", "used during pathfinding :param arg: Not needed, but is included for the functions", "\"\"\" Function for the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute. :param", "-> None: \"\"\" Function for the random walls button, 10% of the nodes", "added as kwargs. :param pathfinder_obj: Pathfinder object to link to the Gui (class", "a popup Gui to the main_gui :return: True if pathfinder is ready to", "\"\"\" Set the pathfinder.algo attribute to the algorithm associated with the AlgoButton :param", "to handle clicks on Buttons (see classes.py for prefabricated classes to use) :param", "define the getter functions for the Stat objects Then Instantiate the Stat object", "TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom +", "injected in is_clicked :return: None \"\"\" self.is_activated = False for column in grid_obj.all_nodes:", ":return: True if it's time to display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono", "return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If priority object was", "= Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input +=", "values, scale the grid and show all changes :return: None \"\"\" def scale_and_draw()", "for column in grid_obj.all_nodes: for node in column: if randrange(11) == 0: if", "cst.RED) bg_width = 2 * text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj =", "= handle_priority(prio) # Clicking outside the child-most Gui is forbidden if not click_used:", "== pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input = \"\" else: Gui.text_input = Gui.text_input[:-1]", ":param self: inject reference to self. For AlgoButton, this parameter is always injected", "Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict() #", "func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui =", "\"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"]", "except KeyError: pass class Gui: text_input = \"\" def __init__(self, dict_object: Dict[str, Any],", "+ 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y +", "object loaded from pickle file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start =", "For Checkboxes this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr", "None \"\"\" def disable_buttons() -> None: \"\"\" Disable Buttons that cannot be used", "redistributing to handle click or handle input methods. TYPING: *additional: (event.type, function(event, *args)", "This module handles the user interface (GUI) for the pathfinding visualizer. It handles", "main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205", "main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place", "and injects the n_wide and n_high dependencies from the main_gui's grid_n_wide and grid_n_high", "ending node. If no end or start node is defined, adds a popup", "False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found =", "window \"\"\" def ok_func(root: Gui, child: Gui) -> None: \"\"\" Removes the popup_gui", "to remove from Parent :param attributes: Used to specify \"grid\" if it needs", "get_algo_dt() -> float: \"\"\" Get algorithm process time from the pathfinder or the", "/ 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height()", ":return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton,", "alot of setter functions could be shifted into a get_data(gui) method by the", "Button functions for particular cases: def random_walls(self: GridButton) -> None: \"\"\" Function for", "self.handle_input(event) for user_event, func, *args in additional: if event.type == user_event: func(event, *args)", "from the root/parent Gui. Disable the grid from receiving input and redraw it", "and display the buttons the show change. :param arg: display_moves_button.is_activated, For Checkboxes this", "it will be incremented during the loop position_y = start_height - nodes_height position_x", "called once it has been initialised :return: None \"\"\" if self.timer(): self.display() def", "ok_func(root: Gui, child: Gui) -> None: \"\"\" Removes the popup_gui from the root/parent", "- ext_close=True Removes Gui from parent Gui on external clicks :param dict_object: all", "a time because all Button's position depend on the previous ones Last we", "input buttons, displaying stats, popups and setting start/end nodes It also creates all", "None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) -> None: \"\"\" Switches", "grid to screen, as well as possible, might make grid go out of", "Also I wanted to make a flexible GUI object to be able to", "of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj in self.__dict__.values() if obj.__class__", "= Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom", "= Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() -", "pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False) -> None:", "= {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object,", "3, \"max\": cfg.window.get_width() - 205 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom", "used = True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj)", "func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK,", "save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw()", "\"\"\" # Button functions for particular cases: def random_walls(self: GridButton) -> None: \"\"\"", "if not button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated and", ":param arg: display_moves_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return:", "Function for the diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute. :param arg:", "GridButton) -> None: \"\"\" Function for the random walls button, 10% of the", "\"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) -> None: \"\"\" Function for the", "for i, j in group: if i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i,", "False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used click = pg.Rect(mouse_pos, (1, 1))", "\"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler: def __init__(self, background: Background, increment: int", "\"\"\" self.objects = [] self.events = [] for name, obj in dict_object.items(): setattr(self,", "pop_up(announcement: str) -> Gui: \"\"\" Creates a Pop-up window Gui with a single", "if click was used, else False \"\"\" used = False if isinstance(priority_obj, Gui):", "\"\"\" for obj in self.objects: obj.display() for key in attributes: self.__dict__[key].display() def handle_events(self,", "is not typed to avoid import) :param grid_obj: Grid object to link to", "the diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute. :param arg: diago_button.is_activated, For", "the gui :return: None \"\"\" for obj in self.objects: obj.display() for key in", "cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp = grid_obj.end grid_obj.end = None temp.is_end", "&= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects:", "use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"]", "Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15,", "\"rb\") as file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\"", "self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None: \"\"\" Call display() method", "for button in self.objects: try: if not button.is_disabled: if click.colliderect(button.rect): click_used = True", "the pathfinding visualizer. It handles the function for clicking on buttons, using input", "main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3,", "the StatsHandler :param pathfinder: Pathfinder object of the program (Singleton) (class not typed", "Gui.text_input = \"\" else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self,", "\"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler =", "parent Gui, allows the child to remove itself from the parent's objects once", "False. Disable Buttons that cannot be used during pathfinding :param arg: Not needed,", "with open(direct, \"wb\") as file: dump(save_object, file) def load_grid() -> None: \"\"\" Load", "(event.type, function(event, *args) -> None , *args), ... :param additional: Allows entering specific", "TODO: try resetting the focus to pygame def save() -> None: \"\"\" Save", "path found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get", "the n_wide and n_high dependencies from the main_gui's grid_n_wide and grid_n_high TextInputButtons' values", "algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0,", "the parent's objects when it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if", "import) :param grid_obj: Grid object to link to the Gui :return: Gui object", "-> float: \"\"\" Get the fps of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler", "40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"]", "*additional: (event.type, function(event, *args) -> None , *args), ... :param additional: Allows entering", "15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35),", "len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get the fps of the program\"\"\" return", "the values of the grid_n_wide and grid_n_high buttons and display changes :param save_object:", "for key in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) -> None: \"\"\" Handle", "temp = grid_obj.start grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is", "import pygame as pg import config as cfg import constants as cst from", "nodes_height position_x = cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes: position_x += nodes_width", "clicking on buttons, using input buttons, displaying stats, popups and setting start/end nodes", "rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\",", "dependency as root, to allow the child Gui to remove itself # from", "node.height = nodes_height node.width = nodes_width node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position,", "\\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus to pygame def save()", "all buttons that will be added to the Gui. Second, create a dict", "also creates all buttons by the init_gui function \"\"\" from pickle import load,", "was used, else False \"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if", "of pathfinder.display_steps attribute. Disables the run interval and wait time buttons of the", "bg_height) / 2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all()", "to self. For AlgoButton, this parameter is always injected in is_clicked :return: None", "grid has a starting node and an ending node. If no end or", "\"\"\" Handles timing of the stats handler :return: True if it's time to", "\"\"\" This module handles the user interface (GUI) for the pathfinding visualizer. It", "False \"\"\" used = False if isinstance(priority_obj, Gui): # Inject parent Gui dependency", "Gui object \"\"\" # Button functions for particular cases: def random_walls(self: GridButton) ->", "time buttons of the main_gui if display_steps if False, and display the buttons", "= Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler: def __init__(self, background:", "function(event, *args) -> None , *args), ... :param additional: Allows entering specific (event.type,", "Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input =", "main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if", "pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to show disabled buttons before", "+ 10), 40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\":", "node is defined, adds a popup Gui to the main_gui :return: True if", "DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict() # It's a bit ugly doing", "be added to the Gui as \"attribute\": object. The dict is defined one", "for column in grid_obj.all_nodes: for node in column: node.neighbors = None node.came_from =", "child-most Gui is forbidden if not click_used: for button in self.objects: try: if", "background: Background, increment: int = 200, **kwargs: Stat) -> None: \"\"\" Creates a", "None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) -> None: \"\"\" Set the", "added to its objects, it will pass down its click events to it", "buttons of the main_gui if display_steps if False, and display the buttons the", "dimension_butt.rect.h) / 2 + 100 / 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width()", "grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) -> None: \"\"\" Switches the pathfinder.running", "position_y = start_height - nodes_height position_x = cfg.button_background_rect.width - nodes_width for column in", "# Also I wanted to make a flexible GUI object to be able", "Gui as \"attribute\": object. The dict is defined one line at a time", "pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = []", "in a list of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj in", "the program's fill and blits orders are appended to one of the lists", "clicks affect it, clicking outside of a DropDownButton's rect is allowed and clicks", "and wait time buttons of the main_gui if display_steps if False, and display", "parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg def", "None: \"\"\" Save the Grid object as a Pickle file in the Grids", "for this function to run, once the Enter key is pressed, it's confirm_input(self.text_input)", "arguments. First define the necessary functions for all buttons that will be added", "= DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\",", "the first because it will be incremented during the loop position_y = start_height", "2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 / 4), \"OK\", func=ok_func) background", "window or screen, if a Gui object is added to its objects, it", "handle clicks on Buttons (see classes.py for prefabricated classes to use) :param kwargs:", "None: \"\"\" Disable Buttons that cannot be used during pathfinding :return: None \"\"\"", "grid from receiving input and redraw it to cover the popup necessary kwargs:", "for event in self.events: if event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN:", "resetting search, False if resetting grid :return: None\"\"\" pathfinder_obj.running = False if not", "ok_button}) popup.draw_all() return popup class StatsHandler: def __init__(self, background: Background, increment: int =", "\", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \",", "click_used # TODO: alot of setter functions could be shifted into a get_data(gui)", "moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom", "processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\" Get the time taken", ":param grid_obj: Grid object to link to the Gui :return: Gui object \"\"\"", "= True algo_gui = Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"]", "\"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None:", "Gui): priority_to = obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\"", "Buttons that cannot be used during pathfinding :param arg: Not needed, but is", "main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0,", "or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to show disabled buttons before pathfinder", "walls :param self: random_walls button object. For GridButton, this parameter is always injected", "None: \"\"\" Set the pathfinder.algo attribute to the algorithm associated with the AlgoButton", "background self.chrono = 0 self.increment = increment def display(self) -> None: \"\"\" Display", "a starting node and an ending node. If no end or start node", "else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running", "main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg:", "that cannot be used during pathfinding :return: None \"\"\" for obj in main_gui_handler.objects:", "popup window :return: A Gui object representing the popup window \"\"\" def ok_func(root:", "draw_all(self, *attributes: str) -> None: \"\"\" Call display() method on all of its", "continue obj.display() def check_conditions() -> bool: \"\"\" Check that an algorithm is defined,", "the bool of pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes this parameter is", "random walls button, 10% of the nodes in the grid will become walls", "n_wide and n_high dependencies from the main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return:", "Node.END | Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill())", "(15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\":", "Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() -> None: \"\"\" Does all the", "on a Background (Background is important so the anti aliased text does not", "of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height", "message and remove the Gui from its parent Gui. Use as follows: from", "= self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\" Calls", "group.clear() def pop_up(announcement: str) -> Gui: \"\"\" Creates a Pop-up window Gui with", "outside the child-most Gui is forbidden if not click_used: for button in self.objects:", "self: inject reference to self. For AlgoButton, this parameter is always injected in", "from os import getcwd, mkdir from os.path import join, exists import tkinter from", "define the necessary functions for all buttons that will be added to the", "self.objects: if button.__class__ is TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input)", "get_path_len() -> float: \"\"\" Get the lenght of the shortest path found by", "Background, increment: int = 200, **kwargs: Stat) -> None: \"\"\" Creates a Singleton", "handler for displaying Stat objects on a Background (Background is important so the", ":param root: parent Gui, allows the child to remove itself from the parent's", "All the program's fill and blits orders are appended to one of the", "AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False,", "if one is in its objects (LIMITED TO ONE CHILD GUI). If any", "if not used and not priority_obj.external: used = True if not used and", "if grid_obj.end is not None: temp = grid_obj.end grid_obj.end = None temp.is_end =", "child: Child Gui to remove from parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks()", "recursively until used. Specific kwargs: - external=True Allows clicks outside the Gui's objects", "main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def", "AttributeError: pass return click_used # TODO: alot of setter functions could be shifted", "pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton,", "must be of type pg.KEYDOWN event :return: None \"\"\" for button in self.objects:", "elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used #", "the pathfinder to unclutter this # module and remove some LOC... def init_gui(pathfinder_obj:", "Gui's objects - ext_close=True Removes Gui from parent Gui on external clicks :param", "pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\" Get the time taken", "screen size, draw the grid :return: None \"\"\" # scale grid to screen,", "window then clear channel, fills are made before blits. All the program's fill", "look if any clicks affect it, clicking outside of a DropDownButton's rect is", "\"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0,", "for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo:", "possible, might make grid go out of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"]", "See the necessary kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError:", "\"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return True return False def", "add attributes to the gui, used for dependency injection. \"\"\" self.objects = []", "Pathfinder object of the program (Singleton) (class not typed to avoid import) :return:", "have an is_clicked() method to handle clicks on Buttons (see classes.py for prefabricated", "(event.type, function, (*args)) tuples to handle other events. The function will receive parameters", "child Gui that might be spawned during the program :return: object with priority", "time taken for preprocessing the grid's nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt,", ":return: True if click was used, else False \"\"\" def check_priority() -> Union[DropDownButton,", "out of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"]", "parent Gui on external clicks :param dict_object: all objects in dict_object MUST have", "key in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) -> None: \"\"\" Handle click", "def disable_buttons() -> None: \"\"\" Disable Buttons that cannot be used during pathfinding", "the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed on", "the popup window :return: A Gui object representing the popup window \"\"\" def", "for the functions of other StateButtons :return: None \"\"\" def disable_buttons() -> None:", "node.neighbors = None node.came_from = None if node.update_color() is not cst.BLACK: if not", "a DropDownButton's rect is allowed and clicks will be registered, but is forbidden", "main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj)", "forbidden if not click_used: for button in self.objects: try: if not button.is_disabled: if", "True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3,", "taken for preprocessing the grid's nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2)", "\"\"\" Does all the fills and blits to the window then clear channel,", "necessary kwargs: - 'root': Parent/root Gui - 'child': Child Gui to remove from", "(ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK,", "= 2 * text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj = (text_surf, ((bg_width", ":param arg: diago_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return:", "to cover the popup necessary kwargs: - 'root': Parent/root Gui - 'child': Child", "display to show disabled buttons before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def", "Checkbox. Switches the bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this", "Function for the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute. :param arg:", "StateButtons :return: None \"\"\" def disable_buttons() -> None: \"\"\" Disable Buttons that cannot", "Background]] = dict() # It's a bit ugly doing it like this but", "event.type == user_event: func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard", "TextInputButton, StateButton, SystemButton, Background]] = dict() # It's a bit ugly doing it", "on external clicks :param dict_object: all objects in dict_object MUST have a display()", "- dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 / 4),", "\"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width: \",", "bool of pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes this parameter is always", "= TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10),", "True algo_gui = Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] =", "is defined one line at a time because all Button's position depend on", "timer(self) -> bool: \"\"\" Handles timing of the stats handler :return: True if", "fills are made before blits. All the program's fill and blits orders are", "user_event, func, *args in additional: if event.type == user_event: func(event, *args) self.events.clear() def", "method by the pathfinder to unclutter this # module and remove some LOC...", "= object) and in a list of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj", "position_x += nodes_width for node in column: position_y += nodes_height node.height = nodes_height", "as possible, might make grid go out of borders nodes_width = grid_obj.width /", "of the stat handler (\"attribute\" = object) and in a list of stats", "not grid_obj.start and node is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg:", "the getter functions for the Stat objects Then Instantiate the Stat object as", "the run interval and wait time buttons of the main_gui if display_steps if", "direct: with open(direct, \"rb\") as file: save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func()", "AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict() # It's a bit", "in self.objects: if button.__class__ is TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE: if", "== user_event: func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user", "if any clicks affect it, clicking outside of a DropDownButton's rect is allowed", "\"\"\" pathfinder_obj.display = arg # if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys():", "/ main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the", "way I know to keep reference to the previous entry. # Also I", "node in column: node.neighbors = None node.came_from = None if node.update_color() is not", "always injected in is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False", "the program :return: object with priority \"\"\" priority_to = None for obj in", "be incremented during the loop position_y = start_height - nodes_height position_x = cfg.button_background_rect.width", "and n_high dependencies from the main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return: None", "handler (\"attribute\" = object) and in a list of stats \"\"\" self.__dict__.update(kwargs) self.stats", "\"\"\" Switches the pathfinder.running attribute on and off on every press if run", "Gui, allows the child to remove itself from the parent's objects once terminated", "cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) -> Gui: \"\"\" Creates a Pop-up window", "as well as possible, might make grid go out of borders nodes_width =", "key is pressed, it's confirm_input(self.text_input) method will be called with the injected input.", "a Pickle file in the Grids folder (or other) :return: None \"\"\" tkinter.Tk().withdraw()", "= increment def display(self) -> None: \"\"\" Display all Stat object in self.stats.", "handler, it's the only thing that needs to be called once it has", "by redistributing to handle click or handle input methods. TYPING: *additional: (event.type, function(event,", "during pathfinding :param arg: Not needed, but is included for the functions of", "main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the first", "not typed to avoid import) :param grid_obj: Grid object to link to the", "to use) :param kwargs: add attributes to the gui, used for dependency injection.", "method will be called with the injected input. :param event: must be of", "= GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top),", "dict_object: all objects in dict_object MUST have a display() method and can have", "with a single OK button to dismiss the message and remove the Gui", "time to display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return True", "orders are appended to one of the lists in cst.to_display (see constants.py module),", "None , *args), ... :param additional: Allows entering specific (event.type, function, (*args)) tuples", "None: \"\"\" Creates a Singleton Stats handler for displaying Stat objects on a", "\"max\": 9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \")", "be registered, but is forbidden for child Gui :param priority_obj: The object with", "None: \"\"\" Updates the attributes of the grid object and the values of", "self.objects: obj.display() for key in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) -> None:", "pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display to", "wanted to make a flexible GUI object to be able to use it", "0 self.increment = increment def display(self) -> None: \"\"\" Display all Stat object", "of the nodes in the grid will become walls :param self: random_walls button", "Background (Background is important so the anti aliased text does not become opaque.", "is always injected in is_clicked :return: None \"\"\" pathfinder_obj.display = arg # if", "if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\"))", "join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes", ":param event: must be of type pg.KEYDOWN event :return: None \"\"\" for button", "+ 30), 50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\":", "name, obj in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj", "kwargs. :param pathfinder_obj: Pathfinder object to link to the Gui (class is not", "become walls :param self: random_walls button object. For GridButton, this parameter is always", "allowed and clicks will be registered, but is forbidden for child Gui :param", "\"\"\" Load a grid object from the Grids folder (or other), update values,", "10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\",", "2) def get_path_len() -> float: \"\"\" Get the lenght of the shortest path", "AttributeError: continue obj.display() def check_conditions() -> bool: \"\"\" Check that an algorithm is", "channel, fills are made before blits. All the program's fill and blits orders", "file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"]", "disp_moves_func(arg: bool) -> None: \"\"\" Function for the display moves Checkbox. Switches the", "= False) -> None: \"\"\" Stops the pathfinder, and reset all necessary attributes,", "AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated #", "methods. TYPING: *additional: (event.type, function(event, *args) -> None , *args), ... :param additional:", "TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 - 25, \"default\": 100, \"value\": 100}, (15,", "objects on a Background (Background is important so the anti aliased text does", "to the main_gui :return: True if pathfinder is ready to run \"\"\" if", "elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input += event.unicode button.dict[\"value\"]", "Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else:", "15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y +", "10), \"Random walls\", func=random_walls) # algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\",", "| Node.END | Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH)", "text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler: def", "Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0,", "pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running #", "cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y", "show disabled buttons before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool", "__init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None: \"\"\" Creates a gui window", "event :return: None \"\"\" for button in self.objects: if button.__class__ is TextInputButton and", "the message and remove the Gui from its parent Gui. Use as follows:", "for name, obj in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for", "\"\"\" Disable Buttons that cannot be used during pathfinding :return: None \"\"\" for", "\"max\": cfg.window.get_width() - 205 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom +", "for column in grid_obj.all_nodes: position_x += nodes_width for node in column: position_y +=", "adds a popup Gui to the main_gui :return: True if pathfinder is ready", "55), get_rsr_dt), fps_stat=Stat(\"FPS: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path", "= priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and not priority_obj.external: used = True", "text does not become opaque. :param background: Background object where the stats will", "Stat] self.background = background self.chrono = 0 self.increment = increment def display(self) ->", "main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right +", "is always injected in is_clicked :return: None \"\"\" self.is_activated = False for column", "main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10),", "setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs)", "\"\" else: Gui.text_input = Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input)", ":param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return:", "False def main(self) -> None: \"\"\" Main loop of the stats handler, it's", "issues are fixed by injecting the necessary objects as arguments. First define the", "grid_obj.all_nodes: for node in column: node.neighbors = None node.came_from = None if node.update_color()", "bg_width) / 2, (cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)), text_obj) popup =", ":return: A Gui object representing the popup window \"\"\" def ok_func(root: Gui, child:", "[obj for obj in self.__dict__.values() if obj.__class__ is Stat] self.background = background self.chrono", "gui window or screen, if a Gui object is added to its objects,", "= getcwd() grid_path = join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs)", "If any of its objects is clicked, the object's is_clicked() method will be", "in column: position_y += nodes_height node.height = nodes_height node.width = nodes_width node.position =", "with injected dependency to the pathfinder to access stats values. First define the", "parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf", "Parent/root Gui - 'child': Child Gui to remove from Parent :param attributes: Used", "cover the popup necessary kwargs: - 'root': Parent/root Gui - 'child': Child Gui", "buttons by the init_gui function \"\"\" from pickle import load, dump from os", "popup class StatsHandler: def __init__(self, background: Background, increment: int = 200, **kwargs: Stat)", "round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\" Get the time taken for preprocessing", "generate() -> None: \"\"\" Calls the generate method of the grid object, and", "additional attributes of the gui :return: None \"\"\" for obj in self.objects: obj.display()", "child: Gui) -> None: \"\"\" Removes the popup_gui from the root/parent Gui. Disable", "(1, 1)) click_used = False prio = check_priority() if prio: click_used = handle_priority(prio)", "the grid object to fit current screen size, draw the grid :return: None", "made before blits. All the program's fill and blits orders are appended to", "attribute. :param arg: diago_button.is_activated, For Checkboxes this parameter is always injected in is_clicked", "is :param partial: True if resetting search, False if resetting grid :return: None\"\"\"", "constants.py module), for special cases there is an early and a late channel.", "main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5,", "None: temp = grid_obj.start grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end", "main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset", "\"\" else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int,", "button.__class__ is TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1:", "once terminated :return: True if click was used, else False \"\"\" def check_priority()", "defined one line at a time because all Button's position depend on the", "loaded from pickle file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"]", "= SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5,", "diago_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return: None \"\"\"", "this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.diago = arg", "False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"]", "and end nodes as is :param partial: True if resetting search, False if", "from the parent's objects when it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try:", "text_surf.get_height()) / 3)) dimension_butt = Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w)", "since it started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\" Get", "0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for column in", "fps of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200,", "|= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\" Function for the display", "main_gui_handler.objects: if obj.__class__ is not StateButton and obj is not main_gui_handler.exit_button: try: obj.is_disabled", "announcement: Text to be displayed on the popup window :return: A Gui object", "handle_priority(prio) # Clicking outside the child-most Gui is forbidden if not click_used: for", "= StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15,", "outside of a DropDownButton's rect is allowed and clicks will be registered, but", "to run, once the Enter key is pressed, it's confirm_input(self.text_input) method will be", "additional: if event.type == user_event: func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\"", ":return: None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf =", "\"\"\" def scale_and_draw() -> None: \"\"\" Scale the grid object to fit current", "setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes:", "+ 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom +", "Checkbox. Switches the bool of pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes this", "setting start/end nodes It also creates all buttons by the init_gui function \"\"\"", "is_clicked :return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def", "Get the time taken for preprocessing the grid's nodes' neighbors from the pathfinder\"\"\"", "buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top),", "I know to keep reference to the previous entry. # Also I wanted", "display moves Checkbox. Switches the bool of pathfinder.display_steps attribute. Disables the run interval", "\"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True)", "in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\":", "dimension_butt = Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height()", "GUI). If any of its objects is clicked, the object's is_clicked() method will", "False \"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if any of the", "pg.event.Event): \"\"\" Process Keyboard user input, the entered text is stored as a", "main_gui_handler def handle_display() -> None: \"\"\" Does all the fills and blits to", "= grid_obj.start grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not", "its objects (LIMITED TO ONE CHILD GUI). If any of its objects is", "a bit ugly doing it like this but it's the only way I", "a dict of all the objects to be added to the Gui as", "self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the StatsHandler object, with injected", "grid object, and injects the n_wide and n_high dependencies from the main_gui's grid_n_wide", "TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used # TODO: alot of setter functions", "to fit current screen size, draw the grid :return: None \"\"\" # scale", "to make a flexible GUI object to be able to use it elsewhere", "main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10),", "-> None: \"\"\" Switches the pathfinder.running attribute on and off on every press", "always injected in is_clicked :return: None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool)", "from pickle import load, dump from os import getcwd, mkdir from os.path import", "background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y +", ":return: None\"\"\" pathfinder_obj.running = False if not partial: if grid_obj.start is not None:", "display_moves_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return: None \"\"\"", "in is_clicked :return: None \"\"\" pathfinder_obj.display = arg # if \"wait_time_button\" in gui.__dict__.keys()", "to its objects, it will pass down its click events to it recursively", "functions for particular cases: def random_walls(self: GridButton) -> None: \"\"\" Function for the", "-> bool: \"\"\" Handle clicking events, will recursively pass down click events to", "the objects to be added to the Gui as \"attribute\": object. The dict", "\"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) -> None: \"\"\" Switches the", "self.objects: if isinstance(obj, Gui): priority_to = obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui])", "make a flexible GUI object to be able to use it elsewhere (pop-ups)", "SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top),", "Gui({f\"{button.algo}\": button for button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom +", "handle input methods. TYPING: *additional: (event.type, function(event, *args) -> None , *args), ...", "class StatsHandler: def __init__(self, background: Background, increment: int = 200, **kwargs: Stat) ->", "10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\": 0},", "main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\":", "5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"]", "the Grids folder (or other), update values, scale the grid and show all", "interval and wait time buttons of the main_gui if display_steps if False, and", "None: \"\"\" Main loop of the stats handler, it's the only thing that", "= 0 for column in grid_obj.all_nodes: for node in column: node.neighbors = None", "or screen, if a Gui object is added to its objects, it will", "pathfinder to unclutter this # module and remove some LOC... def init_gui(pathfinder_obj: Any,", "I wanted to make a flexible GUI object to be able to use", "= save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct =", "= [obj for obj in self.__dict__.values() if obj.__class__ is Stat] self.background = background", "its objects is clicked, the object's is_clicked() method will be called if it", "updates of the stats in ms :param kwargs: add stat objects as attributes", "direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as", "is defined, adds a popup Gui to the main_gui :return: True if pathfinder", "Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if check_conditions(): pathfinder_obj.running = not", "Substracting the first because it will be incremented during the loop position_y =", "Gui) -> None: \"\"\" Removes the popup_gui from the root/parent Gui. Disable the", "self.stats = [obj for obj in self.__dict__.values() if obj.__class__ is Stat] self.background =", "to cover the popup :param root: Parent/root Gui :param child: Child Gui to", "\"\"\" Process Keyboard user input, the entered text is stored as a class", "it, clicking outside of a DropDownButton's rect is allowed and clicks will be", "30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom + 30), \"Save Grid\", save)", "for dependency injection. \"\"\" self.objects = [] self.events = [] for name, obj", "dump from os import getcwd, mkdir from os.path import join, exists import tkinter", "ones Last we create the Gui from the dict, with pathfinder and grid", "of the shortest path found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() ->", "kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input = \"\" def __init__(self, dict_object: Dict[str,", "any clicks affect it, clicking outside of a DropDownButton's rect is allowed and", "was found look if any clicks affect it, clicking outside of a DropDownButton's", "start node is defined, adds a popup Gui to the main_gui :return: True", "display() method on additional attributes of the gui :return: None \"\"\" for obj", "inject reference to self. For AlgoButton, this parameter is always injected in is_clicked", "rect is allowed and clicks will be registered, but is forbidden for child", "\"\"\" Main loop of the stats handler, it's the only thing that needs", "screen, as well as possible, might make grid go out of borders nodes_width", "pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) -> Gui: \"\"\"", "update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT))", "tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as file: save_object_ =", "the buttons the show change. :param arg: display_moves_button.is_activated, For Checkboxes this parameter is", "Removes the popup_gui from the root/parent Gui. Disable the grid from receiving input", "check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if any of the Gui's objects require", "grid will become walls :param self: random_walls button object. For GridButton, this parameter", "as pg import config as cfg import constants as cst from classes import", "grid :return: None\"\"\" pathfinder_obj.running = False if not partial: if grid_obj.start is not", "cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled = False obj.display() except AttributeError: #", "click or handle input methods. TYPING: *additional: (event.type, function(event, *args) -> None ,", "((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3)) dimension_butt = Button((0,", "from random import randrange from typing import * import pygame as pg import", "\") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right", "for all buttons that will be added to the Gui. Second, create a", "on clicks (currently only for DropDownButton and any child Gui that might be", "handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None) -> bool: \"\"\" Handle clicking", "Union[DropDownButton, Gui]) -> bool: \"\"\" If priority object was found look if any", "float: \"\"\" Get the lenght of the shortest path found by the pathfinder\"\"\"", "self: random_walls button object. For GridButton, this parameter is always injected in is_clicked", "StateButton, SystemButton, Background]] = dict() # It's a bit ugly doing it like", "clicking events, will recursively pass down click events to child Gui if one", "None: temp = grid_obj.end grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init =", "from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be displayed", "to one of the lists in cst.to_display (see constants.py module), for special cases", "func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() - bg_height) /", "of the main_gui if display_steps if False, and display the buttons the show", "classes to use) :param kwargs: add attributes to the gui, used for dependency", "the program (Singleton) (class not typed to avoid import) :return: StatsHandler object \"\"\"", "func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user input, the", "on all of its objects. :param attributes: Call the display() method on additional", "Parent/root Gui :param child: Child Gui to remove from parent :return: None \"\"\"", "main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1,", "Stat object in self.stats. :return: None \"\"\" self.background.display() for stat in self.stats: stat.display()", "GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) # algo buttons algo_buttons = [AlgoButton((0,", "cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y", ":return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input =", "remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the popup_gui from the root/parent Gui. Disable", "False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"]", "to the Gui as \"attribute\": object. The dict is defined one line at", "if isinstance(obj, Gui): priority_to = obj return priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) ->", "None \"\"\" for group in cst.to_display: for i, j in group: if i.__class__", "grid_obj.all_nodes: for node in column: if randrange(11) == 0: if node is not", "# creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton,", "be of type pg.KEYDOWN event :return: None \"\"\" for button in self.objects: if", "folder (or other), update values, scale the grid and show all changes :return:", "pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes: for node in", "apply_rsr_button.is_activated, For Checkboxes this parameter is always injected in is_clicked :return: None \"\"\"", "if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError: pass", "is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) -> None: \"\"\"", "algorithm associated with the AlgoButton :param self: inject reference to self. For AlgoButton,", "StatsHandler: def __init__(self, background: Background, increment: int = 200, **kwargs: Stat) -> None:", "\"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40,", ">= self.chrono: self.chrono += self.increment return True return False def main(self) -> None:", "clicks (currently only for DropDownButton and any child Gui that might be spawned", "dump(save_object, file) def load_grid() -> None: \"\"\" Load a grid object from the", "in self.stats: stat.display() def timer(self) -> bool: \"\"\" Handles timing of the stats", "cfg.window.get_height() - 125 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10),", "temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp = grid_obj.end grid_obj.end", ":param pathfinder: Pathfinder object of the program (Singleton) (class not typed to avoid", "to the previous entry. # Also I wanted to make a flexible GUI", "Calls the generate method of the grid object, and injects the n_wide and", "\"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0),", "main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\":", "Grids folder (or other) :return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if", "not become opaque. :param background: Background object where the stats will be displayed", "start Node!\")) return False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could add", "changes :param save_object: save object loaded from pickle file :return: None \"\"\" grid_obj.all_nodes", "kwargs: - external=True Allows clicks outside the Gui's objects - ext_close=True Removes Gui", "it's confirm_input(self.text_input) method will be called with the injected input. :param event: must", "\"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right", "looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False) -> None: \"\"\" Stops", "25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width:", "there is an early and a late channel. :return: None \"\"\" for group", "direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct, \"rb\") as file: save_object_ = load(file)", "node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in", "is always injected in is_clicked :return: None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg:", "main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom", "if resetting grid :return: None\"\"\" pathfinder_obj.running = False if not partial: if grid_obj.start", "40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\": 0},", "as follows: from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to", "stat objects as attributes of the stat handler (\"attribute\" = object) and in", "= StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width()", ":param save_object: save object loaded from pickle file :return: None \"\"\" grid_obj.all_nodes =", "specify \"grid\" if it needs to be redrawn :param kwargs: See the necessary", "text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 * text_surf.get_width() bg_height = 4", "in column: node.neighbors = None node.came_from = None if node.update_color() is not cst.BLACK:", "(15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom +", "= pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width", "stat.display() def timer(self) -> bool: \"\"\" Handles timing of the stats handler :return:", "of the program (Singleton) (class not typed to avoid import) :return: StatsHandler object", "of the grid object and the values of the grid_n_wide and grid_n_high buttons", "in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None: \"\"\" Call display()", "node.status &= ~(Node.WALL | Node.END | Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER |", "in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons,", "Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\"", "the child Gui to remove itself # from the parent's objects when it", "and redraw it to cover the popup necessary kwargs: - 'root': Parent/root Gui", "\") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right +", "pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes: for node in column: node.neighbors =", "**kwargs: Any) -> None: \"\"\" Creates a gui window or screen, if a", "active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\",", "init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\" Initialise the main Gui for the", "\"\"\" priority_to = None for obj in self.objects: if isinstance(obj, Gui): priority_to =", "remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used click = pg.Rect(mouse_pos, (1, 1)) click_used", "LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\" Initialise the main Gui", "Any, grid_obj: Grid) -> Gui: \"\"\" Initialise the main Gui for the visualizer", "Creates a Pop-up window Gui with a single OK button to dismiss the", "other), update values, scale the grid and show all changes :return: None \"\"\"", "**kwargs: Stat) -> None: \"\"\" Creates a Singleton Stats handler for displaying Stat", "and not priority_obj.external: used = True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated =", "None) -> None: \"\"\" Switches the pathfinder.running attribute on and off on every", "popup :param root: Parent/root Gui :param child: Child Gui to remove from parent", "round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\" Get the time taken for preprocessing", "to child Gui if one is in its objects (LIMITED TO ONE CHILD", "= None) -> None: \"\"\" Switches the pathfinder.running attribute on and off on", "the stats handler, it's the only thing that needs to be called once", "using input buttons, displaying stats, popups and setting start/end nodes It also creates", "dict_object MUST have a display() method and can have an is_clicked() method to", "\"\"\" Get the lenght of the shortest path found by the pathfinder\"\"\" return", "\"attribute\": object. The dict is defined one line at a time because all", "'Gui' = None) -> bool: \"\"\" Handle clicking events, will recursively pass down", "it's time to display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return", "avoid import) :param grid_obj: Grid object to link to the Gui :return: Gui", "to link to the Gui :return: Gui object \"\"\" # Button functions for", "is False. Disable Buttons that cannot be used during pathfinding :param arg: Not", "None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running =", "the visualizer module, most dependency issues are fixed by injecting the necessary objects", "\"wb\") as file: dump(save_object, file) def load_grid() -> None: \"\"\" Load a grid", "Union[DropDownButton, Gui]: \"\"\" Check if any of the Gui's objects require priority on", "follows: from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text to be", "main_gui if display_steps if False, and display the buttons the show change. :param", "a Background (Background is important so the anti aliased text does not become", ":return: None \"\"\" for button in self.objects: if button.__class__ is TextInputButton and button.is_activated:", "arg def set_algo(self: AlgoButton) -> None: \"\"\" Set the pathfinder.algo attribute to the", "for the pathfinding visualizer. It handles the function for clicking on buttons, using", "-> None , *args), ... :param additional: Allows entering specific (event.type, function, (*args))", "during the program :return: object with priority \"\"\" priority_to = None for obj", "or start node is defined, adds a popup Gui to the main_gui :return:", "True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError:", "exit_func() -> None: \"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI", "\"grid\" if it needs to be redrawn :param kwargs: See the necessary kwargs", "/ 3)) dimension_butt = Button((0, 0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) /", "the only thing that needs to be called once it has been initialised", "and can have an is_clicked() method to handle clicks on Buttons (see classes.py", "be used during pathfinding :return: None \"\"\" for obj in main_gui_handler.objects: if obj.__class__", "folder (or other) :return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct:", "join, exists import tkinter from tkinter import filedialog from random import randrange from", "Pickle file in the Grids folder (or other) :return: None \"\"\" tkinter.Tk().withdraw() direct", "random_walls button object. For GridButton, this parameter is always injected in is_clicked :return:", "attribute. A TextInputButton must be activated for this function to run, once the", "getter functions for the Stat objects Then Instantiate the Stat object as kwargs", "object from the Grids folder (or other), update values, scale the grid and", "main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) #", "process time from the pathfinder or the time since it started processing\"\"\" return", "of the gui :return: None \"\"\" for obj in self.objects: obj.display() for key", "= 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt =", "-> None: \"\"\" Stops the pathfinder, and reset all necessary attributes, also reset", "\"\"\" Initialise the StatsHandler object, with injected dependency to the pathfinder to access", "elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in additional: if event.type", "any of the Gui's objects require priority on clicks (currently only for DropDownButton", "= 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes: for", "False if isinstance(priority_obj, Gui): # Inject parent Gui dependency as root, to allow", "it's the only way I know to keep reference to the previous entry.", "cannot be used during pathfinding :return: None \"\"\" for obj in main_gui_handler.objects: if", "pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0", "child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 * text_surf.get_width() bg_height =", "obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self,", "nodes_height grid_obj.display() def update_values(save_object: dict) -> None: \"\"\" Updates the attributes of the", ":param kwargs: add stat objects as attributes of the stat handler (\"attribute\" =", "called with the injected input. :param event: must be of type pg.KEYDOWN event", "Set the pathfinder.algo attribute to the algorithm associated with the AlgoButton :param self:", "/ 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 / 4), \"OK\", func=ok_func)", "pathfinder: Pathfinder object of the program (Singleton) (class not typed to avoid import)", "main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom", "SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30),", "+ 100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2", "initialised :return: None \"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler: \"\"\"", "could be shifted into a get_data(gui) method by the pathfinder to unclutter this", "stats getters def get_algo_dt() -> float: \"\"\" Get algorithm process time from the", "to run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END,", "in ms :param kwargs: add stat objects as attributes of the stat handler", "display(self) -> None: \"\"\" Display all Stat object in self.stats. :return: None \"\"\"", "main_gui :return: True if pathfinder is ready to run \"\"\" if pathfinder_obj.algo: if", ":param self: random_walls button object. For GridButton, this parameter is always injected in", "Stat) -> None: \"\"\" Creates a Singleton Stats handler for displaying Stat objects", "cst.to_display: for i, j in group: if i.__class__ is pg.Surface: cfg.window.blit(i, j) else:", "and keyboard input events by redistributing to handle click or handle input methods.", "cst.to_display (see constants.py module), for special cases there is an early and a", "the pathfinder, and reset all necessary attributes, also reset the grid. If partial,", "= dict() # It's a bit ugly doing it like this but it's", "tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end,", "None: \"\"\" Creates a gui window or screen, if a Gui object is", "to handle other events. The function will receive parameters (event, args). (I couldn't", "grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0", "one is in its objects (LIMITED TO ONE CHILD GUI). If any of", "objects require priority on clicks (currently only for DropDownButton and any child Gui", "walls\", func=random_walls) # algo buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK,", "from parent Gui on external clicks :param dict_object: all objects in dict_object MUST", "shifted into a get_data(gui) method by the pathfinder to unclutter this # module", "to show disabled buttons before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial:", "thing that needs to be called once it has been initialised :return: None", "-> float: \"\"\" Get the time taken for preprocessing Rectangular Symmetry Reduction from", "special cases there is an early and a late channel. :return: None \"\"\"", "Gui :return: Gui object \"\"\" # Button functions for particular cases: def random_walls(self:", "\"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input = \"\" def", "popup Gui to the main_gui :return: True if pathfinder is ready to run", "remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\" Calls the generate method of the", "the display() method on additional attributes of the gui :return: None \"\"\" for", "Process Keyboard user input, the entered text is stored as a class attribute.", "Buttons that cannot be used during pathfinding :return: None \"\"\" for obj in", "None for obj in self.objects: if isinstance(obj, Gui): priority_to = obj return priority_to", "else False \"\"\" used = False if isinstance(priority_obj, Gui): # Inject parent Gui", "Gui with a single OK button to dismiss the message and remove the", "position_x = cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes: position_x += nodes_width for", "in main_gui_handler.objects: try: obj.is_disabled = False obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled", "is not None: temp = grid_obj.start grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill())", "if pathfinder.search_is_init is False. Disable Buttons that cannot be used during pathfinding :param", "elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] = GridButton((15,", "receiving input and redraw it to cover the popup :param root: Parent/root Gui", "to remove from parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\",", "Pop-up window Gui with a single OK button to dismiss the message and", "f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str)", "*additional: Any) -> None: \"\"\" Handle click and keyboard input events by redistributing", "Gui object is added to its objects, it will pass down its click", "not click_used: for button in self.objects: try: if not button.is_disabled: if click.colliderect(button.rect): click_used", "\"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated =", "and redraw it to cover the popup :param root: Parent/root Gui :param child:", "None: \"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui:", "stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj in self.__dict__.values() if obj.__class__ is", "button in self.objects: try: if not button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self,", "= pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height - nodes_height grid_obj.display() def update_values(save_object: dict)", "are met. Call pathfinder.init_search methode if pathfinder.search_is_init is False. Disable Buttons that cannot", "\", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK,", "2 + 100 / 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width)", "stat in self.stats: stat.display() def timer(self) -> bool: \"\"\" Handles timing of the", "time taken for preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2)", "For AlgoButton, this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.algo", "found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get the", "other StateButtons :return: None \"\"\" def disable_buttons() -> None: \"\"\" Disable Buttons that", "and obj is not main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError: continue obj.display()", "entry. # Also I wanted to make a flexible GUI object to be", ":return: None \"\"\" pathfinder_obj.algo = self.algo main_gui_handler.dropdown_algo.is_activated = False remove_from_root(root=main_gui_handler, child=algo_gui) def generate()", "outside the Gui's objects - ext_close=True Removes Gui from parent Gui on external", "= None node.came_from = None if node.update_color() is not cst.BLACK: if not partial:", "None \"\"\" def scale_and_draw() -> None: \"\"\" Scale the grid object to fit", "objects as arguments. First define the necessary functions for all buttons that will", "attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always injected in is_clicked", "root/parent Gui. Disable the grid from receiving input and redraw it to cover", "= Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\")", "pathfinder_obj.init_search() def reset(partial: bool = False) -> None: \"\"\" Stops the pathfinder, and", "click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input)", "to the algorithm associated with the AlgoButton :param self: inject reference to self.", "except AttributeError: continue obj.display() def check_conditions() -> bool: \"\"\" Check that an algorithm", "== 0: if node is not grid_obj.start and node is not grid_obj.end: node.status", "= OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100", "TextInputButton must be activated for this function to run, once the Enter key", "main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause)", "recursively pass down click events to child Gui if one is in its", "random import randrange from typing import * import pygame as pg import config", "is allowed and clicks will be registered, but is forbidden for child Gui", "get_data(gui) method by the pathfinder to unclutter this # module and remove some", "button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used # TODO: alot of", "main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() -> None: \"\"\" Does", "before blits. All the program's fill and blits orders are appended to one", "round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\" Get the lenght of the shortest", "def scale_and_draw() -> None: \"\"\" Scale the grid object to fit current screen", "DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15,", "associated with the AlgoButton :param self: inject reference to self. For AlgoButton, this", "float: \"\"\" Get the fps of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler =", "\"max\": 200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size:", "if a Gui object is added to its objects, it will pass down", "\"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the", "pathfinder_obj.running = not pathfinder_obj.running # could add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons()", "\"\"\" Get the fps of the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler(", "scale_and_draw() -> None: \"\"\" Scale the grid object to fit current screen size,", "is not cst.BLACK: if not partial: node.status &= ~(Node.WALL | Node.END | Node.START)", "not used and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return", "+ 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\",", "column in grid_obj.all_nodes: position_x += nodes_width for node in column: position_y += nodes_height", "else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) -> Gui: \"\"\" Creates a Pop-up", "necessary kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class", "the dict, with pathfinder and grid added as kwargs. :param pathfinder_obj: Pathfinder object", "10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0, \"value\":", "the Stat object as kwargs for the StatsHandler :param pathfinder: Pathfinder object of", "on the previous ones Last we create the Gui from the dict, with", "int], root: 'Gui' = None) -> bool: \"\"\" Handle clicking events, will recursively", "main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply", "click_used = False prio = check_priority() if prio: click_used = handle_priority(prio) # Clicking", "0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)]", "user interface (GUI) for the pathfinding visualizer. It handles the function for clicking", "buttons algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0),", "10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 - 25,", "Gui that might be spawned during the program :return: object with priority \"\"\"", "event.type == pg.MOUSEBUTTONDOWN: self.handle_clicks(pg.mouse.get_pos()) elif event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func, *args", "to be called once it has been initialised :return: None \"\"\" if self.timer():", "focus to pygame def save() -> None: \"\"\" Save the Grid object as", "always injected in is_clicked :return: None \"\"\" pathfinder_obj.display = arg # if \"wait_time_button\"", "If no end or start node is defined, adds a popup Gui to", "might make grid go out of borders nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height", "Handles timing of the stats handler :return: True if it's time to display", "= \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus", "pass def diago_func(arg: bool) -> None: \"\"\" Function for the diago_allowed Checkbox. Switches", "in grid_obj.all_nodes: position_x += nodes_width for node in column: position_y += nodes_height node.height", "generate method of the grid object, and injects the n_wide and n_high dependencies", "def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\" Initialise the main Gui for", "obj in main_gui_handler.objects: try: obj.is_disabled = False obj.display() except AttributeError: # (Backgrounds) continue", "(see constants.py module), for special cases there is an early and a late", "values of the grid_n_wide and grid_n_high buttons and display changes :param save_object: save", "**kwargs) -> None: \"\"\" Removes the popup_gui from the root/parent Gui. Disable the", "dependency issues are fixed by injecting the necessary objects as arguments. First define", "pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False) -> None: \"\"\" Stops the pathfinder,", "included for the functions of other StateButtons :return: None \"\"\" def disable_buttons() ->", "grid_n_wide and grid_n_high buttons and display changes :param save_object: save object loaded from", "= Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] = Checkbox(\"Display moves\",", "(text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3)) dimension_butt =", "if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could add pause/unpause timers... if not", "dependency to the pathfinder to access stats values. First define the getter functions", "from pickle file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end", "pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt", "the bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is", "except AttributeError: pass return used click = pg.Rect(mouse_pos, (1, 1)) click_used = False", "OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 /", "nodes_width node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height", "in group: if i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def", "the grid will become walls :param self: random_walls button object. For GridButton, this", "during pathfinding :return: None \"\"\" for obj in main_gui_handler.objects: if obj.__class__ is not", "float: \"\"\" Get algorithm process time from the pathfinder or the time since", "main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place", "node.position = (position_x, position_y) node.rect = pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height -", "| Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled", "bool: \"\"\" Check that an algorithm is defined, the grid has a starting", "pathfinder_obj.algo: if grid_obj.start: if grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else:", "stored as a class attribute. A TextInputButton must be activated for this function", "grid object and the values of the grid_n_wide and grid_n_high buttons and display", "grid_obj.start and node is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool)", "= check_priority() if prio: click_used = handle_priority(prio) # Clicking outside the child-most Gui", "def load_grid() -> None: \"\"\" Load a grid object from the Grids folder", "of the grid object, and injects the n_wide and n_high dependencies from the", "obj.__class__ is Stat] self.background = background self.chrono = 0 self.increment = increment def", "node is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None:", "== pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in additional: if event.type == user_event:", "with priority \"\"\" priority_to = None for obj in self.objects: if isinstance(obj, Gui):", "main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool) -> None: \"\"\" Function for the", "attributes: Call the display() method on additional attributes of the gui :return: None", "so the anti aliased text does not become opaque. :param background: Background object", "True if click was used, else False \"\"\" def check_priority() -> Union[DropDownButton, Gui]:", "def set_algo(self: AlgoButton) -> None: \"\"\" Set the pathfinder.algo attribute to the algorithm", "\"Nodes in height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1,", "pathfinder and grid added as kwargs. :param pathfinder_obj: Pathfinder object to link to", "\"\"\" Get the time taken for preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\"", "incremented during the loop position_y = start_height - nodes_height position_x = cfg.button_background_rect.width -", "def timer(self) -> bool: \"\"\" Handles timing of the stats handler :return: True", "are fixed by injecting the necessary objects as arguments. First define the necessary", "grid_obj: Grid object to link to the Gui :return: Gui object \"\"\" #", "main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass", "used, else False \"\"\" used = False if isinstance(priority_obj, Gui): # Inject parent", "during the loop position_y = start_height - nodes_height position_x = cfg.button_background_rect.width - nodes_width", "be added to the Gui. Second, create a dict of all the objects", "pathfinder.init_search methode if pathfinder.search_is_init is False. Disable Buttons that cannot be used during", "typing import * import pygame as pg import config as cfg import constants", "self. For AlgoButton, this parameter is always injected in is_clicked :return: None \"\"\"", "Node!\")) return False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could add pause/unpause", "display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return True return False", "from parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child)", "self.__dict__[key].display() def handle_events(self, *additional: Any) -> None: \"\"\" Handle click and keyboard input", "update display to show disabled buttons before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search()", "the popup_gui from the root/parent Gui. Disable the grid from receiving input and", "AlgoButton, this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.algo =", "pathfinder_obj.display = arg # if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try:", "show all changes :return: None \"\"\" def scale_and_draw() -> None: \"\"\" Scale the", "stats values. First define the getter functions for the Stat objects Then Instantiate", "None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox,", "and reset all necessary attributes, also reset the grid. If partial, leaves walls,", "click_used: for button in self.objects: try: if not button.is_disabled: if click.colliderect(button.rect): click_used =", "keep reference to the previous entry. # Also I wanted to make a", "as file: dump(save_object, file) def load_grid() -> None: \"\"\" Load a grid object", "all Stat object in self.stats. :return: None \"\"\" self.background.display() for stat in self.stats:", "popup.draw_all() return popup class StatsHandler: def __init__(self, background: Background, increment: int = 200,", "GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict() # It's a", "button, 10% of the nodes in the grid will become walls :param self:", "\", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\": 1}, (15,", "return False def main(self) -> None: \"\"\" Main loop of the stats handler,", "= False remove_from_root(root=main_gui_handler, child=algo_gui) def generate() -> None: \"\"\" Calls the generate method", "= TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10),", "access stats values. First define the getter functions for the Stat objects Then", "all the fills and blits to the window then clear channel, fills are", "\"\"\" Get the time taken for preprocessing the grid's nodes' neighbors from the", "node.update_color() is not cst.BLACK: if not partial: node.status &= ~(Node.WALL | Node.END |", "self.objects: try: if not button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif", "2, (bg_height - text_surf.get_height()) / 3)) dimension_butt = Button((0, 0), \"OK\") ok_button =", "keyboard input events by redistributing to handle click or handle input methods. TYPING:", "the gui, used for dependency injection. \"\"\" self.objects = [] self.events = []", "# (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO:", "for obj in self.objects: obj.display() for key in attributes: self.__dict__[key].display() def handle_events(self, *additional:", "blits to the window then clear channel, fills are made before blits. All", ":param announcement: Text to be displayed on the popup window :return: A Gui", "2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup", "for DropDownButton and any child Gui that might be spawned during the program", "Allows entering specific (event.type, function, (*args)) tuples to handle other events. The function", "the shortest path found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float:", "main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError: continue obj.display() def check_conditions() -> bool:", "\"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if any of the Gui's", "(class is not typed to avoid import) :param grid_obj: Grid object to link", "grid_obj.start grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None:", "\"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start, \"end\":", "creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton,", "\"Exit\", exit_func) main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() -> None:", "= Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2, (cfg.window.get_height() - bg_height) / 2), (bg_width,", "Tuple[int, int], root: 'Gui' = None) -> bool: \"\"\" Handle clicking events, will", "False pathfinder_obj.path_found = False pathfinder_obj.frontier = [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path =", "important so the anti aliased text does not become opaque. :param background: Background", "to the gui, used for dependency injection. \"\"\" self.objects = [] self.events =", "clear channel, fills are made before blits. All the program's fill and blits", "child Gui :param priority_obj: The object with priority :return: True if click was", "Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func) main_gui_handler = Gui(main_gui,", "the only way I know to keep reference to the previous entry. #", "[] for name, obj in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj)", "if display_steps if False, and display the buttons the show change. :param arg:", "specific (event.type, function, (*args)) tuples to handle other events. The function will receive", ":return: None \"\"\" def disable_buttons() -> None: \"\"\" Disable Buttons that cannot be", "gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not", "kwargs: add attributes to the gui, used for dependency injection. \"\"\" self.objects =", "event: pg.event.Event): \"\"\" Process Keyboard user input, the entered text is stored as", "function, (*args)) tuples to handle other events. The function will receive parameters (event,", "scale_and_draw() def exit_func() -> None: \"\"\" Exit program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) #", "(pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25),", "main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15,", "/ main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the first because it will be", "attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) -> None: \"\"\" Handle click and keyboard", "0 pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes: for node in column: node.neighbors", "if it has one. :param mouse_pos: Coordinates of the cursor :param root: parent", "= Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj) return main_gui_handler def handle_display() -> None: \"\"\" Does all", "First define the getter functions for the Stat objects Then Instantiate the Stat", "objects Then Instantiate the Stat object as kwargs for the StatsHandler :param pathfinder:", "0: if node is not grid_obj.start and node is not grid_obj.end: node.status |=", "used for dependency injection. \"\"\" self.objects = [] self.events = [] for name,", "nodes_width for column in grid_obj.all_nodes: position_x += nodes_width for node in column: position_y", "the Gui. Second, create a dict of all the objects to be added", "the grid from receiving input and redraw it to cover the popup :param", "between updates of the stats in ms :param kwargs: add stat objects as", "has one. :param mouse_pos: Coordinates of the cursor :param root: parent Gui, allows", "in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display()", "(currently only for DropDownButton and any child Gui that might be spawned during", "start/end nodes It also creates all buttons by the init_gui function \"\"\" from", "called if it has one. :param mouse_pos: Coordinates of the cursor :param root:", "grid_obj.width / main_gui_handler.grid_n_wide_button.dict[\"value\"] nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting", "True if pathfinder is ready to run \"\"\" if pathfinder_obj.algo: if grid_obj.start: if", "algo_buttons = [AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\",", "appended to one of the lists in cst.to_display (see constants.py module), for special", "-> None: \"\"\" Function for the diago_allowed Checkbox. Switches the bool of pathfinder.diago", "grid_obj.height / main_gui_handler.grid_n_high_button.dict[\"value\"] start_height = 25 # Substracting the first because it will", "a class attribute. A TextInputButton must be activated for this function to run,", "End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\") main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right +", "reset(partial: bool = False) -> None: \"\"\" Stops the pathfinder, and reset all", "= Gui.text_input[:-1] button.dict[\"value\"] = Gui.text_input elif event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\"", "obj.__class__ is not StateButton and obj is not main_gui_handler.exit_button: try: obj.is_disabled = True", "constants as cst from classes import * folder_path = getcwd() grid_path = join(folder_path,", "cst.BLACK: if not partial: node.status &= ~(Node.WALL | Node.END | Node.START) node.status &=", "priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used click =", "to be able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) #", "dependencies from the main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"],", "not main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError: continue obj.display() def check_conditions() ->", "is included for the functions of other StateButtons :return: None \"\"\" def disable_buttons()", "\"\"\" Calls the generate method of the grid object, and injects the n_wide", "= \"\" else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos:", "None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start,", "35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55),", "Any) -> None: \"\"\" Handle click and keyboard input events by redistributing to", "pg.Rect(mouse_pos, (1, 1)) click_used = False prio = check_priority() if prio: click_used =", "necessary functions for all buttons that will be added to the Gui. Second,", "0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\":", "grid_obj.end: return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\"))", "disable_buttons() -> None: \"\"\" Disable Buttons that cannot be used during pathfinding :return:", "the lenght of the shortest path found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def", "def disp_moves_func(arg: bool) -> None: \"\"\" Function for the display moves Checkbox. Switches", "return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\" Get the fps of the program\"\"\"", "= None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp =", "= grid_obj.end grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far", "handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user input, the entered text is stored", "always injected in is_clicked :return: None \"\"\" self.is_activated = False for column in", "this # module and remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) ->", "of other StateButtons :return: None \"\"\" def disable_buttons() -> None: \"\"\" Disable Buttons", "grid_obj.end is not None: temp = grid_obj.end grid_obj.end = None temp.is_end = False", "self.__dict__.values() if obj.__class__ is Stat] self.background = background self.chrono = 0 self.increment =", ":param kwargs: See the necessary kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes)", "5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid) main_gui[\"exit_button\"] = SystemButton((15, main_gui[\"load_grid_button\"].rect.bottom + 30), \"Exit\", exit_func)", "0), \"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) /", "- 'child': Child Gui to remove from Parent :param attributes: Used to specify", "input and redraw it to cover the popup necessary kwargs: - 'root': Parent/root", "-> None: \"\"\" Save the Grid object as a Pickle file in the", "and priority_obj.ext_close: priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used click", "+ 10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999, \"default\": 0,", "does not become opaque. :param background: Background object where the stats will be", "object to be able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect)", "in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled =", "of setter functions could be shifted into a get_data(gui) method by the pathfinder", "used and not priority_obj.external: used = True if not used and priority_obj.ext_close: priority_obj.src_butt.is_activated", "pickle file :return: None \"\"\" grid_obj.all_nodes = save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end =", "typing right...) :return: None \"\"\" for event in self.events: if event.type == pg.MOUSEBUTTONDOWN:", "Any], **kwargs: Any) -> None: \"\"\" Creates a gui window or screen, if", "reset all necessary attributes, also reset the grid. If partial, leaves walls, start", "self.increment return True return False def main(self) -> None: \"\"\" Main loop of", "GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw", "= False obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled =", "and blits to the window then clear channel, fills are made before blits.", "\"OK\") ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2", "def __init__(self, background: Background, increment: int = 200, **kwargs: Stat) -> None: \"\"\"", "getcwd, mkdir from os.path import join, exists import tkinter from tkinter import filedialog", "grid_path = join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None:", "text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3)) dimension_butt = Button((0, 0), \"OK\")", "Gui :param priority_obj: The object with priority :return: True if click was used,", "- 'root': Parent/root Gui - 'child': Child Gui to remove from Parent :param", "save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file:", "classes.py for prefabricated classes to use) :param kwargs: add attributes to the gui,", "a Singleton Stats handler for displaying Stat objects on a Background (Background is", "when it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and", "and in a list of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj", "(GUI) for the pathfinding visualizer. It handles the function for clicking on buttons,", "algorithm process time from the pathfinder or the time since it started processing\"\"\"", "used = False if isinstance(priority_obj, Gui): # Inject parent Gui dependency as root,", "reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10), \"Play/Pause\", play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\":", "True if click was used, else False \"\"\" used = False if isinstance(priority_obj,", "for node in column: if randrange(11) == 0: if node is not grid_obj.start", "confirm_input(self.text_input) method will be called with the injected input. :param event: must be", "parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.diago = arg def", "file: dump(save_object, file) def load_grid() -> None: \"\"\" Load a grid object from", "main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool) ->", "and a late channel. :return: None \"\"\" for group in cst.to_display: for i,", "None: \"\"\" Function for the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute.", "object \"\"\" # defining stats getters def get_algo_dt() -> float: \"\"\" Get algorithm", "True except AttributeError: continue obj.display() def check_conditions() -> bool: \"\"\" Check that an", "are made before blits. All the program's fill and blits orders are appended", "\"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height: \",", "# It's a bit ugly doing it like this but it's the only", "resetting grid :return: None\"\"\" pathfinder_obj.running = False if not partial: if grid_obj.start is", "= arg # if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled", "pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt", "GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]]", "= start_height - nodes_height position_x = cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes:", "the necessary functions for all buttons that will be added to the Gui.", "(LIMITED TO ONE CHILD GUI). If any of its objects is clicked, the", "add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] ==", "pressed, it's confirm_input(self.text_input) method will be called with the injected input. :param event:", "pg.rect.Rect(node.position, (node.width, node.height)) position_y = start_height - nodes_height grid_obj.display() def update_values(save_object: dict) ->", "grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object, file) def load_grid() ->", "apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes", "click was used, else False \"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check", "and any child Gui that might be spawned during the program :return: object", "pass class Gui: text_input = \"\" def __init__(self, dict_object: Dict[str, Any], **kwargs: Any)", "if it needs to be redrawn :param kwargs: See the necessary kwargs above", "arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def diago_func(arg: bool) -> None: \"\"\" Function", "- external=True Allows clicks outside the Gui's objects - ext_close=True Removes Gui from", "all of its objects. :param attributes: Call the display() method on additional attributes", "can have an is_clicked() method to handle clicks on Buttons (see classes.py for", "of its objects is clicked, the object's is_clicked() method will be called if", "\"max\": cfg.window.get_height() - 125 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom +", "Checkbox. Switches the bool of pathfinder.display_steps attribute. Disables the run interval and wait", "for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None: \"\"\"", "Gui dependency as root, to allow the child Gui to remove itself #", "self.events.clear() def handle_input(self, event: pg.event.Event): \"\"\" Process Keyboard user input, the entered text", "column: position_y += nodes_height node.height = nodes_height node.width = nodes_width node.position = (position_x,", "Gui to remove from parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks() + 100", "values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None) -> None:", "obj is not main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError: continue obj.display() def", "AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui = Gui({f\"{button.algo}\":", "in main_gui_handler.objects: if obj.__class__ is not StateButton and obj is not main_gui_handler.exit_button: try:", "import load, dump from os import getcwd, mkdir from os.path import join, exists", "object and the values of the grid_n_wide and grid_n_high buttons and display changes", "automatic) :param increment: Delay between updates of the stats in ms :param kwargs:", "Parent :param attributes: Used to specify \"grid\" if it needs to be redrawn", "return True return False def main(self) -> None: \"\"\" Main loop of the", "temp = grid_obj.end grid_obj.end = None temp.is_end = False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False", "save() -> None: \"\"\" Save the Grid object as a Pickle file in", "play_pause) main_gui[\"grid_n_wide_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_width() - 205 - 25, \"default\": 100,", "is always injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self:", "pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for column in grid_obj.all_nodes:", "the typing right...) :return: None \"\"\" for event in self.events: if event.type ==", "30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset,", "it recursively until used. Specific kwargs: - external=True Allows clicks outside the Gui's", "is an early and a late channel. :return: None \"\"\" for group in", "# if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not", "any of its objects is clicked, the object's is_clicked() method will be called", "\"\"\" for group in cst.to_display: for i, j in group: if i.__class__ is", "a Pop-up window Gui with a single OK button to dismiss the message", "* text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height())", "For Checkboxes this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.diago", "become opaque. :param background: Background object where the stats will be displayed (positioning", "pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for", "pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return True return False def main(self) ->", "buttons, using input buttons, displaying stats, popups and setting start/end nodes It also", "float: \"\"\" Get the time taken for preprocessing the grid's nodes' neighbors from", "= \"\" def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None: \"\"\" Creates", "root.grid.disabled = pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED)", "\"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) # algo", "15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 35), get_path_len))", "reference to self. For AlgoButton, this parameter is always injected in is_clicked :return:", "True, cst.RED) bg_width = 2 * text_surf.get_width() bg_height = 4 * text_surf.get_height() text_obj", "display the buttons the show change. :param arg: display_moves_button.is_activated, For Checkboxes this parameter", "(node.width, node.height)) position_y = start_height - nodes_height grid_obj.display() def update_values(save_object: dict) -> None:", "... :param additional: Allows entering specific (event.type, function, (*args)) tuples to handle other", "loop position_y = start_height - nodes_height position_x = cfg.button_background_rect.width - nodes_width for column", "(bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup class", "Rectangular Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float:", "visualizer. It handles the function for clicking on buttons, using input buttons, displaying", "the grid has a starting node and an ending node. If no end", "fill and blits orders are appended to one of the lists in cst.to_display", "end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False if check_conditions(): pathfinder_obj.running =", "dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj)", "Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom", "pathfinder.algo attribute to the algorithm associated with the AlgoButton :param self: inject reference", "not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: # update display", "if it's time to display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment", "bool: \"\"\" If priority object was found look if any clicks affect it,", "from classes import * folder_path = getcwd() grid_path = join(folder_path, \"Grids\") if not", "entered text is stored as a class attribute. A TextInputButton must be activated", "try: if not button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated", "grid added as kwargs. :param pathfinder_obj: Pathfinder object to link to the Gui", "rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt), fps_stat=Stat(\"FPS:", "30), \"Save Grid\", save) main_gui[\"load_grid_button\"] = SystemButton((main_gui[\"save_grid_button\"].rect.right + 5, main_gui[\"save_grid_button\"].rect.top), \"Load Grid\", load_grid)", "None: \"\"\" Scale the grid object to fit current screen size, draw the", "and an ending node. If no end or start node is defined, adds", "priority_obj.src_butt.is_activated = False remove_from_root(root=self, child=priority_obj) except AttributeError: pass return used click = pg.Rect(mouse_pos,", "15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15,", "objects once terminated :return: True if click was used, else False \"\"\" def", "it has been initialised :return: None \"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any)", "group: if i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement:", "= cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 * text_surf.get_width() bg_height = 4 *", "program (Singleton) (class not typed to avoid import) :return: StatsHandler object \"\"\" #", "None: \"\"\" Display all Stat object in self.stats. :return: None \"\"\" self.background.display() for", "except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated", "-> None: \"\"\" Does all the fills and blits to the window then", "def handle_display() -> None: \"\"\" Does all the fills and blits to the", "partial: node.status &= ~(Node.WALL | Node.END | Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER", "pg.time.get_ticks() + 100 remove_from_root(\"grid\", root=root, child=child) text_surf = cst.big_text_font.render(announcement, True, cst.RED) bg_width =", "rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True algo_gui", "remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\" Initialise the", "if node.update_color() is not cst.BLACK: if not partial: node.status &= ~(Node.WALL | Node.END", "height: \", func=generate) main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\": 1},", "Child Gui to remove from parent :return: None \"\"\" root.grid.disabled = pg.time.get_ticks() +", "been initialised :return: None \"\"\" if self.timer(): self.display() def init_stats(pathfinder: Any) -> StatsHandler:", "object \"\"\" # Button functions for particular cases: def random_walls(self: GridButton) -> None:", "2, (cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)), text_obj) popup = Gui({\"popup_bg\": background,", "[]) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def draw_all(self, *attributes: str) ->", "bg_height)), text_obj) popup = Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler:", "return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \",", "the parent's objects once terminated :return: True if click was used, else False", "the generate method of the grid object, and injects the n_wide and n_high", "start and end nodes as is :param partial: True if resetting search, False", "-> Gui: \"\"\" Initialise the main Gui for the visualizer module, most dependency", "not used and not priority_obj.external: used = True if not used and priority_obj.ext_close:", "bool: \"\"\" Handles timing of the stats handler :return: True if it's time", "but it's the only way I know to keep reference to the previous", "getcwd() grid_path = join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) ->", ":param kwargs: add attributes to the gui, used for dependency injection. \"\"\" self.objects", ":return: None \"\"\" for group in cst.to_display: for i, j in group: if", "interface (GUI) for the pathfinding visualizer. It handles the function for clicking on", "for node in column: position_y += nodes_height node.height = nodes_height node.width = nodes_width", "or the time since it started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() ->", "mouse_pos: Coordinates of the cursor :param root: parent Gui, allows the child to", "False) -> None: \"\"\" Stops the pathfinder, and reset all necessary attributes, also", "from receiving input and redraw it to cover the popup :param root: Parent/root", "False for column in grid_obj.all_nodes: for node in column: if randrange(11) == 0:", "-> Union[DropDownButton, Gui]: \"\"\" Check if any of the Gui's objects require priority", "Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj", "to allow the child Gui to remove itself # from the parent's objects", "methode if pathfinder.search_is_init is False. Disable Buttons that cannot be used during pathfinding", "object to link to the Gui (class is not typed to avoid import)", "SystemButton, Background]] = dict() # It's a bit ugly doing it like this", "= 25 # Substracting the first because it will be incremented during the", "for the Stat objects Then Instantiate the Stat object as kwargs for the", "(positioning is not automatic) :param increment: Delay between updates of the stats in", "pathfinder_obj.running = False if not partial: if grid_obj.start is not None: temp =", "<filename>src/gui.py<gh_stars>0 \"\"\" This module handles the user interface (GUI) for the pathfinding visualizer.", "the nodes in the grid will become walls :param self: random_walls button object.", "disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom", "to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement buttons", "channel. :return: None \"\"\" for group in cst.to_display: for i, j in group:", "(or other) :return: None \"\"\" tkinter.Tk().withdraw() direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object", "in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) -> None:", "True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return False", "if not partial: if grid_obj.start is not None: temp = grid_obj.start grid_obj.start =", "draw the grid :return: None \"\"\" # scale grid to screen, as well", "\"\"\" Updates the attributes of the grid object and the values of the", "0, \"max\": 9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10),", "# grid placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right", "module), for special cases there is an early and a late channel. :return:", "(class not typed to avoid import) :return: StatsHandler object \"\"\" # defining stats", "obj.is_disabled = False obj.display() except AttributeError: # (Backgrounds) continue main_gui_handler.run_interval_button.is_disabled = \\ main_gui_handler.run_interval_button.is_disabled", "main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right +", "name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in self.objects: self.__dict__[f\"{obj.__class__}_group\"].append(obj) self.__dict__.update(kwargs) def", "objects (LIMITED TO ONE CHILD GUI). If any of its objects is clicked,", "= cfg.button_background_rect.width - nodes_width for column in grid_obj.all_nodes: position_x += nodes_width for node", "before pathfinder starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False) ->", "is_clicked() method to handle clicks on Buttons (see classes.py for prefabricated classes to", "change. :param arg: display_moves_button.is_activated, For Checkboxes this parameter is always injected in is_clicked", "shortest path found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps() -> float: \"\"\"", "import * folder_path = getcwd() grid_path = join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path)", "click events to it recursively until used. Specific kwargs: - external=True Allows clicks", "parent's objects when it is terminated used = priority_obj.handle_clicks(mouse_pos, root=self) try: if not", "no end or start node is defined, adds a popup Gui to the", "button in algo_buttons}, external=True, ext_close=True) main_gui[\"dropdown_algo\"] = DropDownButton((15, main_gui[\"random_walls_button\"].rect.bottom + 30), \"Algo: \",", "ONE CHILD GUI). If any of its objects is clicked, the object's is_clicked()", "getters def get_algo_dt() -> float: \"\"\" Get algorithm process time from the pathfinder", "Keyboard user input, the entered text is stored as a class attribute. A", "main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait: \") main_gui[\"reset_button\"] = StateButton((15, main_gui[\"run_interval_button\"].rect.bottom + 30), \"Reset", "to avoid import) :return: StatsHandler object \"\"\" # defining stats getters def get_algo_dt()", "if pg.time.get_ticks() >= self.chrono: self.chrono += self.increment return True return False def main(self)", "(15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\": 9999,", "functions for all buttons that will be added to the Gui. Second, create", "the previous entry. # Also I wanted to make a flexible GUI object", "the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For", "algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"]", "0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False,", "1, \"max\": 200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush", "2) def get_neighbor_dt() -> float: \"\"\" Get the time taken for preprocessing the", "until used. Specific kwargs: - external=True Allows clicks outside the Gui's objects -", "*args in additional: if event.type == user_event: func(event, *args) self.events.clear() def handle_input(self, event:", "early and a late channel. :return: None \"\"\" for group in cst.to_display: for", "- nodes_height grid_obj.display() def update_values(save_object: dict) -> None: \"\"\" Updates the attributes of", "priority_to def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If priority object was found", ":return: None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) -> None: \"\"\" Set", "priority on clicks (currently only for DropDownButton and any child Gui that might", "will be called with the injected input. :param event: must be of type", "grid_n_high TextInputButtons' values :return: None \"\"\" grid_obj.generate(main_gui_handler.grid_n_wide_button.dict[\"value\"], main_gui_handler.grid_n_high_button.dict[\"value\"]) def play_pause(arg: bool = None)", "in self.objects: obj.display() for key in attributes: self.__dict__[key].display() def handle_events(self, *additional: Any) ->", "def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None) -> bool: \"\"\" Handle", "and the values of the grid_n_wide and grid_n_high buttons and display changes :param", "= GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5, main_gui[\"start_node_button\"].rect.top), \"Place End\")", "str) -> Gui: \"\"\" Creates a Pop-up window Gui with a single OK", "def handle_events(self, *additional: Any) -> None: \"\"\" Handle click and keyboard input events", "clicks will be registered, but is forbidden for child Gui :param priority_obj: The", "and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled = not arg main_gui_handler.run_interval_button.is_disabled = not arg", "object, and injects the n_wide and n_high dependencies from the main_gui's grid_n_wide and", "the necessary objects as arguments. First define the necessary functions for all buttons", "lenght of the shortest path found by the pathfinder\"\"\" return len(pathfinder.shortest_path) def get_fps()", "could add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"]", "Allows clicks outside the Gui's objects - ext_close=True Removes Gui from parent Gui", "main_gui[\"erase_walls_button\"] = GridButton((main_gui[\"draw_walls_button\"].rect.right + 5, main_gui[\"draw_walls_button\"].rect.top), \"Erase walls\") main_gui[\"random_walls_button\"] = GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom +", "module, most dependency issues are fixed by injecting the necessary objects as arguments.", "(Background is important so the anti aliased text does not become opaque. :param", "placement buttons main_gui[\"start_node_button\"] = GridButton((15, 25), \"Place Start\") main_gui[\"end_node_button\"] = GridButton((main_gui[\"start_node_button\"].rect.right + 5,", "it to cover the popup necessary kwargs: - 'root': Parent/root Gui - 'child':", "the cursor :param root: parent Gui, allows the child to remove itself from", "the grid object and the values of the grid_n_wide and grid_n_high buttons and", ":return: None \"\"\" self.background.display() for stat in self.stats: stat.display() def timer(self) -> bool:", "as root, to allow the child Gui to remove itself # from the", "this function to run, once the Enter key is pressed, it's confirm_input(self.text_input) method", "not pathfinder_obj.running # could add pause/unpause timers... if not pathfinder_obj.search_is_init: disable_buttons() if not", "leaves walls, start and end nodes as is :param partial: True if resetting", "self.__dict__.update(kwargs) def draw_all(self, *attributes: str) -> None: \"\"\" Call display() method on all", "\"Algo: \", algo_buttons, child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False,", "is stored as a class attribute. A TextInputButton must be activated for this", "(ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt), rsr_prep_time=Stat(\"RSR Preprocess (ms):", "# update display to show disabled buttons before pathfinder starts looping handle_display() pg.display.flip()", "button.dict[\"value\"] = Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None)", "is not main_gui_handler.exit_button: try: obj.is_disabled = True except AttributeError: continue obj.display() def check_conditions()", "AlgoButton :param self: inject reference to self. For AlgoButton, this parameter is always", "j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str) -> Gui: \"\"\" Creates a", "as attributes of the stat handler (\"attribute\" = object) and in a list", "False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier = []", "= arg def set_algo(self: AlgoButton) -> None: \"\"\" Set the pathfinder.algo attribute to", "return True else: pg.event.post(pg.event.Event(cst.NO_END, announcement=\"No end Node!\")) else: pg.event.post(pg.event.Event(cst.NO_START, announcement=\"No start Node!\")) return", "button to dismiss the message and remove the Gui from its parent Gui.", "The dict is defined one line at a time because all Button's position", "\"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK, rounded=False, func=set_algo),", "cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\" Function for the display moves Checkbox.", "apply_rsr_func(arg: bool) -> None: \"\"\" Function for the apply_rsr Checkbox. Switches the bool", "object) and in a list of stats \"\"\" self.__dict__.update(kwargs) self.stats = [obj for", "of the grid_n_wide and grid_n_high buttons and display changes :param save_object: save object", "9999, \"default\": 0, \"value\": 0}, (main_gui[\"run_interval_button\"].rect.right + 5, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Wait:", "root: parent Gui, allows the child to remove itself from the parent's objects", "handle_events(self, *additional: Any) -> None: \"\"\" Handle click and keyboard input events by", "object is added to its objects, it will pass down its click events", "None \"\"\" self.is_activated = False for column in grid_obj.all_nodes: for node in column:", "cover the popup :param root: Parent/root Gui :param child: Child Gui to remove", "above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass class Gui: text_input", "not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus to pygame def save() ->", "kwargs: - 'root': Parent/root Gui - 'child': Child Gui to remove from Parent", "column in grid_obj.all_nodes: for node in column: if randrange(11) == 0: if node", "pg import config as cfg import constants as cst from classes import *", "AlgoButton) -> None: \"\"\" Set the pathfinder.algo attribute to the algorithm associated with", "click_used = handle_priority(prio) # Clicking outside the child-most Gui is forbidden if not", "active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"Dijkstra\", \"dijkstra\", active_color=cst.BLACK, rounded=False, func=set_algo)] algo_buttons[0].is_activated = True", "None: \"\"\" Call display() method on all of its objects. :param attributes: Call", "start_height - nodes_height grid_obj.display() def update_values(save_object: dict) -> None: \"\"\" Updates the attributes", "like this but it's the only way I know to keep reference to", "obj in dict_object.items(): setattr(self, name, obj) setattr(self, f\"{obj.__class__}_group\", []) self.objects.append(obj) for obj in", "bool: \"\"\" Handle clicking events, will recursively pass down click events to child", "\"\"\" def disable_buttons() -> None: \"\"\" Disable Buttons that cannot be used during", "handle click or handle input methods. TYPING: *additional: (event.type, function(event, *args) -> None", "events, will recursively pass down click events to child Gui if one is", "program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms):", "for particular cases: def random_walls(self: GridButton) -> None: \"\"\" Function for the random", "bool of pathfinder.display_steps attribute. Disables the run interval and wait time buttons of", "the grid's nodes' neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() ->", "the previous ones Last we create the Gui from the dict, with pathfinder", "{\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object, file)", "to the Gui. Second, create a dict of all the objects to be", "1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x", "a gui window or screen, if a Gui object is added to its", "(Singleton) (class not typed to avoid import) :return: StatsHandler object \"\"\" # defining", "get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 35), get_neighbor_dt),", "Any) -> None: \"\"\" Creates a gui window or screen, if a Gui", "to remove itself from the parent's objects once terminated :return: True if click", "the program\"\"\" return round(cfg.clock.get_fps(), 1) stat_handler = StatsHandler( background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time", "input methods. TYPING: *additional: (event.type, function(event, *args) -> None , *args), ... :param", "diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom + 10), False, apply_rsr_func) main_gui[\"display_moves_button\"] =", "event.type == pg.KEYDOWN: self.handle_input(event) for user_event, func, *args in additional: if event.type ==", "KeyError: pass def diago_func(arg: bool) -> None: \"\"\" Function for the diago_allowed Checkbox.", "Stat objects Then Instantiate the Stat object as kwargs for the StatsHandler :param", "handles the function for clicking on buttons, using input buttons, displaying stats, popups", "program. :return: None \"\"\" pg.event.post(pg.event.Event(pg.QUIT)) # creating GUI ##################################################################################################### main_gui: Dict[str, Union[pg.Rect, GridButton,", "Call display() method on all of its objects. :param attributes: Call the display()", "the init_gui function \"\"\" from pickle import load, dump from os import getcwd,", "bool) -> None: \"\"\" Function for the apply_rsr Checkbox. Switches the bool of", "arg def apply_rsr_func(arg: bool) -> None: \"\"\" Function for the apply_rsr Checkbox. Switches", "partial, leaves walls, start and end nodes as is :param partial: True if", "and setting start/end nodes It also creates all buttons by the init_gui function", "is important so the anti aliased text does not become opaque. :param background:", "return main_gui_handler def handle_display() -> None: \"\"\" Does all the fills and blits", "class Gui: text_input = \"\" def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) ->", "self.chrono = 0 self.increment = increment def display(self) -> None: \"\"\" Display all", "= None for obj in self.objects: if isinstance(obj, Gui): priority_to = obj return", "used. Specific kwargs: - external=True Allows clicks outside the Gui's objects - ext_close=True", "= False cst.dirty_fills.append(temp.get_fill()) pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found", "preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() ->", "neighbors from the pathfinder\"\"\" return round(pathfinder.neighbors_prep_dt, 2) def get_rsr_dt() -> float: \"\"\" Get", "= not arg main_gui_handler.run_interval_button.is_disabled = not arg main_gui_handler.wait_time_button.display() main_gui_handler.run_interval_button.display() except KeyError: pass def", "import join, exists import tkinter from tkinter import filedialog from random import randrange", "MUST have a display() method and can have an is_clicked() method to handle", "module and remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui: \"\"\"", "save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0])", "it like this but it's the only way I know to keep reference", "for node in column: node.neighbors = None node.came_from = None if node.update_color() is", "| Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for", "ext_close=True Removes Gui from parent Gui on external clicks :param dict_object: all objects", "the necessary kwargs above :return: None \"\"\" try: kwargs[\"root\"].objects.remove(kwargs[\"child\"]) kwargs[\"root\"].draw_all(*attributes) except KeyError: pass", "# TODO: alot of setter functions could be shifted into a get_data(gui) method", "\"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"] =", "if isinstance(priority_obj, Gui): # Inject parent Gui dependency as root, to allow the", "all the objects to be added to the Gui as \"attribute\": object. The", "self.objects = [] self.events = [] for name, obj in dict_object.items(): setattr(self, name,", "main_gui[\"grid_n_high_button\"] = TextInputButton({\"min\": 3, \"max\": cfg.window.get_height() - 125 - 25, \"default\": 100, \"value\":", "and grid added as kwargs. :param pathfinder_obj: Pathfinder object to link to the", "with the injected input. :param event: must be of type pg.KEYDOWN event :return:", "\"\"\" for obj in main_gui_handler.objects: if obj.__class__ is not StateButton and obj is", "'root': Parent/root Gui - 'child': Child Gui to remove from Parent :param attributes:", "KeyError: pass class Gui: text_input = \"\" def __init__(self, dict_object: Dict[str, Any], **kwargs:", "to the pathfinder to access stats values. First define the getter functions for", "Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict() # It's", "main_gui_handler.run_interval_button.is_disabled = \\ not main_gui_handler.display_moves_button.is_activated # TODO: try resetting the focus to pygame", "event.key == pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input += event.unicode button.dict[\"value\"] =", "For GridButton, this parameter is always injected in is_clicked :return: None \"\"\" self.is_activated", "object to link to the Gui :return: Gui object \"\"\" # Button functions", "-> None: \"\"\" Calls the generate method of the grid object, and injects", "if button.__class__ is TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <=", "if node is not grid_obj.start and node is not grid_obj.end: node.status |= Node.WALL", "None \"\"\" # scale grid to screen, as well as possible, might make", "Does all the fills and blits to the window then clear channel, fills", "None\"\"\" pathfinder_obj.running = False if not partial: if grid_obj.start is not None: temp", "popups and setting start/end nodes It also creates all buttons by the init_gui", "[AlgoButton((0, 0), \"Flood Fill\", \"bfs\", active_color=cst.BLACK, rounded=False, func=set_algo), AlgoButton((0, 0), \"A*\", \"astar\", active_color=cst.BLACK,", "dict_object: Dict[str, Any], **kwargs: Any) -> None: \"\"\" Creates a gui window or", "position_y = start_height - nodes_height grid_obj.display() def update_values(save_object: dict) -> None: \"\"\" Updates", "moves\", (15, main_gui[\"apply_rsr_button\"].rect.bottom + 10), True, disp_moves_func) main_gui[\"run_interval_button\"] = TextInputButton({\"min\": -1, \"max\": 9999,", "is_clicked() method will be called if it has one. :param mouse_pos: Coordinates of", "= (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3)) dimension_butt", "= save_object[\"grid\"] grid_obj.start = save_object[\"start\"] grid_obj.end = save_object[\"end\"] main_gui_handler.grid_n_wide_button.dict[\"value\"] = len(grid_obj.all_nodes) main_gui_handler.grid_n_high_button.dict[\"value\"] =", "100 / 4), \"OK\", func=ok_func) background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2,", "column: node.neighbors = None node.came_from = None if node.update_color() is not cst.BLACK: if", "Gui({\"popup_bg\": background, \"ok_butt\": ok_button}) popup.draw_all() return popup class StatsHandler: def __init__(self, background: Background,", "0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0", "where the stats will be displayed (positioning is not automatic) :param increment: Delay", "\"\"\" Check that an algorithm is defined, the grid has a starting node", "the fills and blits to the window then clear channel, fills are made", "arg # if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\" in gui.__dict__.keys(): try: main_gui_handler.wait_time_button.is_disabled =", "Gui.text_input button.display() def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None) -> bool:", "receiving input and redraw it to cover the popup necessary kwargs: - 'root':", "= [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time = 0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt =", "| Node.VISITED | Node.PATH) cst.dirty_fills.append(node.get_fill()) for obj in main_gui_handler.objects: try: obj.is_disabled = False", "obj.display() def check_conditions() -> bool: \"\"\" Check that an algorithm is defined, the", ":param additional: Allows entering specific (event.type, function, (*args)) tuples to handle other events.", "False prio = check_priority() if prio: click_used = handle_priority(prio) # Clicking outside the", "able to use it elsewhere (pop-ups) main_gui[\"button_background_rect\"] = Background(cst.LIGHT_GREY, cfg.button_background_rect) # grid placement", "+ 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y +", "down click events to child Gui if one is in its objects (LIMITED", "not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\" Function", "arg: Not needed, but is included for the functions of other StateButtons :return:", "Get the lenght of the shortest path found by the pathfinder\"\"\" return len(pathfinder.shortest_path)", "\"end\": grid_obj.end, \"grid\": grid_obj.all_nodes} with open(direct, \"wb\") as file: dump(save_object, file) def load_grid()", "load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\" Exit program. :return: None \"\"\"", ":return: None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) -> None: \"\"\" Function", "&= ~(Node.WALL | Node.END | Node.START) node.status &= ~(Node.SYM_RECT | Node.BORDER | Node.VISITED", "Use as follows: from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement: Text", "injected in is_clicked :return: None \"\"\" pathfinder_obj.diago = arg def apply_rsr_func(arg: bool) ->", "-> None: \"\"\" Removes the popup_gui from the root/parent Gui. Disable the grid", "The function will receive parameters (event, args). (I couldn't get the typing right...)", "attribute. Disables the run interval and wait time buttons of the main_gui if", "and button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input = \"\"", "add stat objects as attributes of the stat handler (\"attribute\" = object) and", "save_object_ = load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\" Exit program. :return:", "the child to remove itself from the parent's objects once terminated :return: True", "+ 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"] = StateButton((15, main_gui[\"reset_search_button\"].rect.bottom + 10),", "Gui (class is not typed to avoid import) :param grid_obj: Grid object to", "the anti aliased text does not become opaque. :param background: Background object where", "self.background.display() for stat in self.stats: stat.display() def timer(self) -> bool: \"\"\" Handles timing", "partial: if grid_obj.start is not None: temp = grid_obj.start grid_obj.start = None temp.is_start", "attributes of the grid object and the values of the grid_n_wide and grid_n_high", "200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \")", "handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool: \"\"\" If priority object was found look if", "unclutter this # module and remove some LOC... def init_gui(pathfinder_obj: Any, grid_obj: Grid)", "have a display() method and can have an is_clicked() method to handle clicks", "Inject parent Gui dependency as root, to allow the child Gui to remove", "1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"] = SystemButton((15, main_gui[\"brush_size_button\"].rect.bottom", "+= self.increment return True return False def main(self) -> None: \"\"\" Main loop", "First define the necessary functions for all buttons that will be added to", "buttons the show change. :param arg: display_moves_button.is_activated, For Checkboxes this parameter is always", "= [] pathfinder_obj.queue.clear() pathfinder_obj.to_be_removed = [] pathfinder_obj.shortest_path = [] pathfinder_obj.run_timer = 0 pathfinder_obj.start_time", "(*args)) tuples to handle other events. The function will receive parameters (event, args).", "off on every press if run conditions are met. Call pathfinder.init_search methode if", "gui :return: None \"\"\" for obj in self.objects: obj.display() for key in attributes:", "\"\"\" self.__dict__.update(kwargs) self.stats = [obj for obj in self.__dict__.values() if obj.__class__ is Stat]", "main_gui_handler.grid_n_high_button.dict[\"value\"] = len(grid_obj.all_nodes[0]) main_gui_handler.grid_n_wide_button.display() main_gui_handler.grid_n_high_button.display() tkinter.Tk().withdraw() direct = filedialog.askopenfilename(initialdir=grid_path) if direct: with open(direct,", "True if it's time to display \"\"\" if pg.time.get_ticks() >= self.chrono: self.chrono +=", "grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\" Function for", "as cfg import constants as cst from classes import * folder_path = getcwd()", "as cst from classes import * folder_path = getcwd() grid_path = join(folder_path, \"Grids\")", "aliased text does not become opaque. :param background: Background object where the stats", "pathfinder_obj: Pathfinder object to link to the Gui (class is not typed to", "self.stats: stat.display() def timer(self) -> bool: \"\"\" Handles timing of the stats handler", "text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) /", "run interval and wait time buttons of the main_gui if display_steps if False,", "be spawned during the program :return: object with priority \"\"\" priority_to = None", "= [] self.events = [] for name, obj in dict_object.items(): setattr(self, name, obj)", "method on additional attributes of the gui :return: None \"\"\" for obj in", "click = pg.Rect(mouse_pos, (1, 1)) click_used = False prio = check_priority() if prio:", "the display moves Checkbox. Switches the bool of pathfinder.display_steps attribute. Disables the run", "+ 5, main_gui[\"start_node_button\"].rect.top), \"Place End\") main_gui[\"draw_walls_button\"] = GridButton((15, main_gui[\"start_node_button\"].rect.bottom + 10), \"Draw walls\")", "15, cfg.stats_background_rect.y + 15), get_algo_dt), neighbor_prep_time=Stat(\"Neighbors Preprocess (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15,", "pathfinding :param arg: Not needed, but is included for the functions of other", "be called if it has one. :param mouse_pos: Coordinates of the cursor :param", "Gui: \"\"\" Creates a Pop-up window Gui with a single OK button to", "def init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the StatsHandler object, with injected dependency", "Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) -> None: \"\"\" Function for the display moves", "link to the Gui :return: Gui object \"\"\" # Button functions for particular", "object. The dict is defined one line at a time because all Button's", "-> None: \"\"\" Set the pathfinder.algo attribute to the algorithm associated with the", "300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y", "of the lists in cst.to_display (see constants.py module), for special cases there is", "100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30), 50, \"Nodes in width: \", func=generate) main_gui[\"grid_n_high_button\"] =", "in additional: if event.type == user_event: func(event, *args) self.events.clear() def handle_input(self, event: pg.event.Event):", "TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input =", "Call pathfinder.init_search methode if pathfinder.search_is_init is False. Disable Buttons that cannot be used", "= 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0 for column", "not exists(grid_path): mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the popup_gui from", "Background object where the stats will be displayed (positioning is not automatic) :param", "0 pathfinder_obj.end_time = 0 pathfinder_obj.neighbors_prep_dt = 0 pathfinder_obj.rsr_prep_dt = 0 pathfinder_obj.algo_dt = 0", "* folder_path = getcwd() grid_path = join(folder_path, \"Grids\") if not exists(grid_path): mkdir(grid_path) def", "with the AlgoButton :param self: inject reference to self. For AlgoButton, this parameter", "of the Gui's objects require priority on clicks (currently only for DropDownButton and", "direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=\".pickle\") if direct: save_object = {\"start\": grid_obj.start, \"end\": grid_obj.end, \"grid\":", "an is_clicked() method to handle clicks on Buttons (see classes.py for prefabricated classes", "dimension_butt.rect.w) / 2, (cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 / 4), \"OK\",", "get_neighbor_dt() -> float: \"\"\" Get the time taken for preprocessing the grid's nodes'", "the time taken for preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt,", "stats will be displayed (positioning is not automatic) :param increment: Delay between updates", "priority_obj.handle_clicks(mouse_pos, root=self) try: if not used and not priority_obj.external: used = True if", "= False if not partial: if grid_obj.start is not None: temp = grid_obj.start", "pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always injected in", "injection. \"\"\" self.objects = [] self.events = [] for name, obj in dict_object.items():", "Gui to remove from Parent :param attributes: Used to specify \"grid\" if it", "timers... if not pathfinder_obj.search_is_init: disable_buttons() if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict[\"value\"] == -1: #", "mkdir(grid_path) def remove_from_root(*attributes, **kwargs) -> None: \"\"\" Removes the popup_gui from the root/parent", "button.is_disabled: if click.colliderect(button.rect): click_used = True button.is_clicked(gui=self, root=root) elif button.is_activated and button.__class__ is", "object as kwargs for the StatsHandler :param pathfinder: Pathfinder object of the program", "def random_walls(self: GridButton) -> None: \"\"\" Function for the random walls button, 10%", "the popup necessary kwargs: - 'root': Parent/root Gui - 'child': Child Gui to", "grid. If partial, leaves walls, start and end nodes as is :param partial:", "cfg.stats_background_rect), increment=200, process_time=Stat(\"Process time (ms): \", cst.BLACK, (cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15),", ":return: Gui object \"\"\" # Button functions for particular cases: def random_walls(self: GridButton)", "handles the user interface (GUI) for the pathfinding visualizer. It handles the function", "- 125 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40,", "displayed (positioning is not automatic) :param increment: Delay between updates of the stats", "starts looping handle_display() pg.display.flip() pathfinder_obj.init_search() def reset(partial: bool = False) -> None: \"\"\"", "for clicking on buttons, using input buttons, displaying stats, popups and setting start/end", "this parameter is always injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg", "node. If no end or start node is defined, adds a popup Gui", "for obj in self.__dict__.values() if obj.__class__ is Stat] self.background = background self.chrono =", "Handle click and keyboard input events by redistributing to handle click or handle", "node.height)) position_y = start_height - nodes_height grid_obj.display() def update_values(save_object: dict) -> None: \"\"\"", "its parent Gui. Use as follows: from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\"))", "and node is not grid_obj.end: node.status |= Node.WALL cst.dirty_fills.append(node.get_fill()) def disp_moves_func(arg: bool) ->", "# Inject parent Gui dependency as root, to allow the child Gui to", "init_stats(pathfinder: Any) -> StatsHandler: \"\"\" Initialise the StatsHandler object, with injected dependency to", "# Substracting the first because it will be incremented during the loop position_y", "Grid) -> Gui: \"\"\" Initialise the main Gui for the visualizer module, most", "25, \"default\": 100, \"value\": 100}, (15, main_gui[\"grid_n_wide_button\"].rect.bottom + 10), 40, \"Nodes in height:", "Disable the grid from receiving input and redraw it to cover the popup", "= load(file) update_values(save_object_) scale_and_draw() def exit_func() -> None: \"\"\" Exit program. :return: None", "child_gui=algo_gui) main_gui[\"diago_button\"] = Checkbox(\"Diagonal moves\", (15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] =", "self.stats. :return: None \"\"\" self.background.display() for stat in self.stats: stat.display() def timer(self) ->", "for obj in main_gui_handler.objects: try: obj.is_disabled = False obj.display() except AttributeError: # (Backgrounds)", "-> None: \"\"\" Call display() method on all of its objects. :param attributes:", "button.is_activated and button.__class__ is TextInputButton: button.confirm_input(self.text_input) except AttributeError: pass return click_used # TODO:", "ugly doing it like this but it's the only way I know to", "the StatsHandler object, with injected dependency to the pathfinder to access stats values.", "time since it started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\"", "clicked, the object's is_clicked() method will be called if it has one. :param", "by the pathfinder to unclutter this # module and remove some LOC... def", "is_clicked :return: None \"\"\" self.is_activated = False for column in grid_obj.all_nodes: for node", "announcement=\"No start Node!\")) return False if check_conditions(): pathfinder_obj.running = not pathfinder_obj.running # could", "for the random walls button, 10% of the nodes in the grid will", ":return: None \"\"\" pathfinder_obj.display = arg # if \"wait_time_button\" in gui.__dict__.keys() and \"run_interval_button\"", "A TextInputButton must be activated for this function to run, once the Enter", "Switches the bool of pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes this parameter", "program :return: object with priority \"\"\" priority_to = None for obj in self.objects:", "itself # from the parent's objects when it is terminated used = priority_obj.handle_clicks(mouse_pos,", "input, the entered text is stored as a class attribute. A TextInputButton must", "Check that an algorithm is defined, the grid has a starting node and", "cst.BLACK, (cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps), path_length=Stat(\"Path length: \", cst.BLACK, (cfg.stats_background_rect.x", "only thing that needs to be called once it has been initialised :return:", "injected in is_clicked :return: None \"\"\" pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) ->", "def ok_func(root: Gui, child: Gui) -> None: \"\"\" Removes the popup_gui from the", "obj.is_disabled = True except AttributeError: continue obj.display() def check_conditions() -> bool: \"\"\" Check", "0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40, \"Run: \") main_gui[\"wait_time_button\"] = TextInputButton({\"min\": 0, \"max\":", "False if resetting grid :return: None\"\"\" pathfinder_obj.running = False if not partial: if", "if i.__class__ is pg.Surface: cfg.window.blit(i, j) else: cfg.window.fill(i, j) group.clear() def pop_up(announcement: str)", "import getcwd, mkdir from os.path import join, exists import tkinter from tkinter import", "is TextInputButton and button.is_activated: if event.key == pg.K_BACKSPACE: if len(Gui.text_input) <= 1: Gui.text_input", "\"\"\" Scale the grid object to fit current screen size, draw the grid", "open(direct, \"wb\") as file: dump(save_object, file) def load_grid() -> None: \"\"\" Load a", "Disables the run interval and wait time buttons of the main_gui if display_steps", "kwargs for the StatsHandler :param pathfinder: Pathfinder object of the program (Singleton) (class", "will be added to the Gui. Second, create a dict of all the", "def save() -> None: \"\"\" Save the Grid object as a Pickle file", "the time since it started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float:", "= 4 * text_surf.get_height() text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height", "for preprocessing Rectangular Symmetry Reduction from the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len()", "if not click_used: for button in self.objects: try: if not button.is_disabled: if click.colliderect(button.rect):", "Gui. Use as follows: from the main Gui, on event: main_Gui.objects.append(pop_up(\"hello\")) :param announcement:", "wait time buttons of the main_gui if display_steps if False, and display the", "is not StateButton and obj is not main_gui_handler.exit_button: try: obj.is_disabled = True except", "TextInputButton({\"min\": -1, \"max\": 9999, \"default\": 0, \"value\": 0}, (15, main_gui[\"display_moves_button\"].rect.bottom + 10), 40,", "Gui - 'child': Child Gui to remove from Parent :param attributes: Used to", "the pathfinder\"\"\" return round(pathfinder.rsr_prep_dt, 2) def get_path_len() -> float: \"\"\" Get the lenght", "\"\"\" Function for the display moves Checkbox. Switches the bool of pathfinder.display_steps attribute.", "Grid\", reset) main_gui[\"reset_search_button\"] = StateButton((main_gui[\"reset_button\"].rect.right + 5, main_gui[\"reset_button\"].rect.top), \"Reset Search\", reset, True) main_gui[\"play_pause_button\"]", "pathfinder_obj.search_is_init = False pathfinder_obj.dijkstra_cost_so_far = 0 pathfinder_obj.running = False pathfinder_obj.path_found = False pathfinder_obj.frontier", "by the init_gui function \"\"\" from pickle import load, dump from os import", "else False \"\"\" def check_priority() -> Union[DropDownButton, Gui]: \"\"\" Check if any of", "= pg.Rect(mouse_pos, (1, 1)) click_used = False prio = check_priority() if prio: click_used", "n_high dependencies from the main_gui's grid_n_wide and grid_n_high TextInputButtons' values :return: None \"\"\"", "pathfinder_obj.apply_rsr = arg def set_algo(self: AlgoButton) -> None: \"\"\" Set the pathfinder.algo attribute", "Checkbox, DropDownButton, TextInputButton, StateButton, SystemButton, Background]] = dict() # It's a bit ugly", "cst.big_text_font.render(announcement, True, cst.RED) bg_width = 2 * text_surf.get_width() bg_height = 4 * text_surf.get_height()", "main_gui[\"brush_size_button\"] = TextInputButton({\"min\": 1, \"max\": 200, \"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom +", "of pathfinder.apply_rsr attribute. :param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always injected", "(15, main_gui[\"dropdown_algo\"].rect.bottom + 10), False, diago_func) main_gui[\"apply_rsr_button\"] = Checkbox(\"Apply RSR\", (15, main_gui[\"diago_button\"].rect.bottom +", "the Gui as \"attribute\": object. The dict is defined one line at a", "it started processing\"\"\" return round(pathfinder.algo_dt, 2) def get_neighbor_dt() -> float: \"\"\" Get the", "Pathfinder object to link to the Gui (class is not typed to avoid", "pg.K_RETURN: button.confirm_input(Gui.text_input) Gui.text_input = \"\" else: Gui.text_input += event.unicode button.dict[\"value\"] = Gui.text_input button.display()", "bool = None) -> None: \"\"\" Switches the pathfinder.running attribute on and off", "(cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 / 4), \"OK\", func=ok_func) background =", "of pathfinder.diago attribute. :param arg: diago_button.is_activated, For Checkboxes this parameter is always injected", "grid_obj.start = None temp.is_start = False cst.dirty_fills.append(temp.get_fill()) if grid_obj.end is not None: temp", "= GridButton((15, main_gui[\"draw_walls_button\"].rect.bottom + 10), \"Random walls\", func=random_walls) # algo buttons algo_buttons =", "is Stat] self.background = background self.chrono = 0 self.increment = increment def display(self)", "\"default\": 1, \"value\": 1}, (15, main_gui[\"grid_n_high_button\"].rect.bottom + 10), 30, \"Brush size: \") main_gui[\"save_grid_button\"]", "injected dependency to the pathfinder to access stats values. First define the getter", "prio: click_used = handle_priority(prio) # Clicking outside the child-most Gui is forbidden if", "cfg.window.get_width() - 205 - 25, \"default\": 100, \"value\": 100}, (15, main_gui[\"play_pause_button\"].rect.bottom + 30),", "the Grid object as a Pickle file in the Grids folder (or other)", "defined, the grid has a starting node and an ending node. If no", "(or other), update values, scale the grid and show all changes :return: None" ]
[ "wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id) &", "point(8, 9)]), section(3, [point(11, 12), point(13, 14)]), section(3, [point(15, 16), point(17, 18)]), section(4,", "segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss), sections(Id, sections(Ss))", "__license__ = 'MIT' import pprint from hornet import * from hornet.symbols import (", "13), point(14, 15)]), section(4, [point(16, 17), point(18, 19)]), ) def ask_wing(db, side): for", "point, Side, Id, S, Ss, W ) def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss))", "subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db = Database() make_wing(db) ask_wing(db, left) ask_wing(db, right)", "utf-8 -*- # # Copyright (C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__", "-*- coding: utf-8 -*- # # Copyright (C) 2016 <NAME> <<EMAIL>> __version__ =", "8)]), section(2, [point(2, 3), point(4, 5)]), section(2, [point(6, 7), point(8, 9)]), section(3, [point(11,", "3), point(4, 5)]), section(2, [point(6, 7), point(8, 9)]), section(3, [point(11, 12), point(13, 14)]),", "section(4, [point(16, 17), point(18, 19)]), ) def ask_wing(db, side): for subst in db.ask(wing(side,", "point(13, 14)]), section(3, [point(15, 16), point(17, 18)]), section(4, [point(12, 13), point(14, 15)]), section(4,", "<<EMAIL>>' __license__ = 'MIT' import pprint from hornet import * from hornet.symbols import", "[point(5, 6), point(7, 8)]), section(2, [point(2, 3), point(4, 5)]), section(2, [point(6, 7), point(8,", "<< segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id) & sections(Id, S),", "15)]), section(4, [point(16, 17), point(18, 19)]), ) def ask_wing(db, side): for subst in", "right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W )", "segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss),", "point(18, 19)]), ) def ask_wing(db, side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db", "'0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint", "# -*- coding: utf-8 -*- # # Copyright (C) 2016 <NAME> <<EMAIL>> __version__", "<< findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left, 2), segment(right, 3), segment(right, 4),", "16), point(17, 18)]), section(4, [point(12, 13), point(14, 15)]), section(4, [point(16, 17), point(18, 19)]),", "2), point(3, 4)]), section(1, [point(5, 6), point(7, 8)]), section(2, [point(2, 3), point(4, 5)]),", "section(2, [point(6, 7), point(8, 9)]), section(3, [point(11, 12), point(13, 14)]), section(3, [point(15, 16),", "4), section(1, [point(1, 2), point(3, 4)]), section(1, [point(5, 6), point(7, 8)]), section(2, [point(2,", "<NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__", "Id, S, Ss, W ) def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side,", "3), segment(right, 4), section(1, [point(1, 2), point(3, 4)]), section(1, [point(5, 6), point(7, 8)]),", "19)]), ) def ask_wing(db, side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db =", "[point(15, 16), point(17, 18)]), section(4, [point(12, 13), point(14, 15)]), section(4, [point(16, 17), point(18,", "section, sections, point, Side, Id, S, Ss, W ) def make_wing(db): db.tell( wing(Side,", "segment, segments, section, sections, point, Side, Id, S, Ss, W ) def make_wing(db):", "section(1, [point(1, 2), point(3, 4)]), section(1, [point(5, 6), point(7, 8)]), section(2, [point(2, 3),", "section(2, [point(2, 3), point(4, 5)]), section(2, [point(6, 7), point(8, 9)]), section(3, [point(11, 12),", "segment(right, 3), segment(right, 4), section(1, [point(1, 2), point(3, 4)]), section(1, [point(5, 6), point(7,", "[point(11, 12), point(13, 14)]), section(3, [point(15, 16), point(17, 18)]), section(4, [point(12, 13), point(14,", "'2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint from hornet import", "18)]), section(4, [point(12, 13), point(14, 15)]), section(4, [point(16, 17), point(18, 19)]), ) def", "import ( side, left, right, wing, segment, segments, section, sections, point, Side, Id,", "= '0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT' import", "point(14, 15)]), section(4, [point(16, 17), point(18, 19)]), ) def ask_wing(db, side): for subst", "segment(right, 4), section(1, [point(1, 2), point(3, 4)]), section(1, [point(5, 6), point(7, 8)]), section(2,", "'MIT' import pprint from hornet import * from hornet.symbols import ( side, left,", "Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss), sections(Id,", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2016 <NAME>", "Side, Id, S, Ss, W ) def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) <<", "__author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint from hornet import *", "S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left, 2),", "Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id) & sections(Id,", "5)]), section(2, [point(6, 7), point(8, 9)]), section(3, [point(11, 12), point(13, 14)]), section(3, [point(15,", "S), Ss), segment(left, 1), segment(left, 2), segment(right, 3), segment(right, 4), section(1, [point(1, 2),", "hornet import * from hornet.symbols import ( side, left, right, wing, segment, segments,", "Ss, W ) def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side,", "segments(Ss)) << findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss), sections(Id, sections(Ss)) <<", "import pprint from hornet import * from hornet.symbols import ( side, left, right,", "section(3, [point(11, 12), point(13, 14)]), section(3, [point(15, 16), point(17, 18)]), section(4, [point(12, 13),", "<<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ =", "coding: utf-8 -*- # # Copyright (C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a'", "Copyright (C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11' __author__ =", "point(17, 18)]), section(4, [point(12, 13), point(14, 15)]), section(4, [point(16, 17), point(18, 19)]), )", "__date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint from", "[point(6, 7), point(8, 9)]), section(3, [point(11, 12), point(13, 14)]), section(3, [point(15, 16), point(17,", "segment(left, 2), segment(right, 3), segment(right, 4), section(1, [point(1, 2), point(3, 4)]), section(1, [point(5,", "db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side,", "wing, segment, segments, section, sections, point, Side, Id, S, Ss, W ) def", "sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left, 2), segment(right, 3),", "= '<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint from hornet import * from", "sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left, 2), segment(right, 3), segment(right,", "left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss, W", "make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S),", "pprint from hornet import * from hornet.symbols import ( side, left, right, wing,", "def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id,", "Id) & sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left,", "9)]), section(3, [point(11, 12), point(13, 14)]), section(3, [point(15, 16), point(17, 18)]), section(4, [point(12,", "python3 # -*- coding: utf-8 -*- # # Copyright (C) 2016 <NAME> <<EMAIL>>", "ask_wing(db, side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db = Database() make_wing(db) ask_wing(db,", "findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id,", "1), segment(left, 2), segment(right, 3), segment(right, 4), section(1, [point(1, 2), point(3, 4)]), section(1,", "= 'MIT' import pprint from hornet import * from hornet.symbols import ( side,", "( side, left, right, wing, segment, segments, section, sections, point, Side, Id, S,", "6), point(7, 8)]), section(2, [point(2, 3), point(4, 5)]), section(2, [point(6, 7), point(8, 9)]),", "sections, point, Side, Id, S, Ss, W ) def make_wing(db): db.tell( wing(Side, wing(side(Side),", "'<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint from hornet import * from hornet.symbols", "import * from hornet.symbols import ( side, left, right, wing, segment, segments, section,", "Ss), segment(left, 1), segment(left, 2), segment(right, 3), segment(right, 4), section(1, [point(1, 2), point(3,", "point(4, 5)]), section(2, [point(6, 7), point(8, 9)]), section(3, [point(11, 12), point(13, 14)]), section(3,", "4)]), section(1, [point(5, 6), point(7, 8)]), section(2, [point(2, 3), point(4, 5)]), section(2, [point(6,", "section(1, [point(5, 6), point(7, 8)]), section(2, [point(2, 3), point(4, 5)]), section(2, [point(6, 7),", "side, left, right, wing, segment, segments, section, sections, point, Side, Id, S, Ss,", "S, Ss, W ) def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss),", "side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db = Database() make_wing(db) ask_wing(db, left)", "[point(16, 17), point(18, 19)]), ) def ask_wing(db, side): for subst in db.ask(wing(side, W)):", "section(4, [point(12, 13), point(14, 15)]), section(4, [point(16, 17), point(18, 19)]), ) def ask_wing(db,", "segment(left, 1), segment(left, 2), segment(right, 3), segment(right, 4), section(1, [point(1, 2), point(3, 4)]),", "def ask_wing(db, side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db = Database() make_wing(db)", "__version__ = '0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT'", "from hornet.symbols import ( side, left, right, wing, segment, segments, section, sections, point,", "segments, section, sections, point, Side, Id, S, Ss, W ) def make_wing(db): db.tell(", "section(3, [point(15, 16), point(17, 18)]), section(4, [point(12, 13), point(14, 15)]), section(4, [point(16, 17),", "(C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME>", "wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) << findall(segment(Id, S), segment(Side, Id)", ") def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss)) <<", "* from hornet.symbols import ( side, left, right, wing, segment, segments, section, sections,", "-*- # # Copyright (C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ =", "section(Id, S), Ss), segment(left, 1), segment(left, 2), segment(right, 3), segment(right, 4), section(1, [point(1,", "Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left, 2), segment(right,", "[point(12, 13), point(14, 15)]), section(4, [point(16, 17), point(18, 19)]), ) def ask_wing(db, side):", "point(7, 8)]), section(2, [point(2, 3), point(4, 5)]), section(2, [point(6, 7), point(8, 9)]), section(3,", "S), segment(Side, Id) & sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S),", "# Copyright (C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11' __author__", "hornet.symbols import ( side, left, right, wing, segment, segments, section, sections, point, Side,", "14)]), section(3, [point(15, 16), point(17, 18)]), section(4, [point(12, 13), point(14, 15)]), section(4, [point(16,", "& sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left, 1),", "12), point(13, 14)]), section(3, [point(15, 16), point(17, 18)]), section(4, [point(12, 13), point(14, 15)]),", "findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left, 2), segment(right, 3), segment(right, 4), section(1,", "[point(1, 2), point(3, 4)]), section(1, [point(5, 6), point(7, 8)]), section(2, [point(2, 3), point(4,", "2), segment(right, 3), segment(right, 4), section(1, [point(1, 2), point(3, 4)]), section(1, [point(5, 6),", "# # Copyright (C) 2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11'", "17), point(18, 19)]), ) def ask_wing(db, side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W])", "= '2016-08-11' __author__ = '<NAME> <<EMAIL>>' __license__ = 'MIT' import pprint from hornet", "sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss), segment(left, 1), segment(left,", "7), point(8, 9)]), section(3, [point(11, 12), point(13, 14)]), section(3, [point(15, 16), point(17, 18)]),", "from hornet import * from hornet.symbols import ( side, left, right, wing, segment,", "2016 <NAME> <<EMAIL>> __version__ = '0.2.5a' __date__ = '2016-08-11' __author__ = '<NAME> <<EMAIL>>'", "for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db = Database() make_wing(db) ask_wing(db, left) ask_wing(db,", "segment(Side, Id) & sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S), section(Id, S), Ss),", "[point(2, 3), point(4, 5)]), section(2, [point(6, 7), point(8, 9)]), section(3, [point(11, 12), point(13,", ") def ask_wing(db, side): for subst in db.ask(wing(side, W)): pprint.pprint(subst[W]) db = Database()", "W ) def make_wing(db): db.tell( wing(Side, wing(side(Side), Ss)) << segments(Side, Ss), segments(Side, segments(Ss))", "point(3, 4)]), section(1, [point(5, 6), point(7, 8)]), section(2, [point(2, 3), point(4, 5)]), section(2,", "<< findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss), sections(Id, sections(Ss)) << findall(section(S)," ]
[ "import random_util class MyTestCase(unittest.TestCase): def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, ' ')", "MyTestCase(unittest.TestCase): def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated", "test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, '", "' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') + random_util.generate_token()) if __name__ ==", "+ random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') +", "= 20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') +", "' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, '", "class MyTestCase(unittest.TestCase): def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id())", "uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') + random_util.generate_token()) if __name__", "') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') + random_util.generate_token()) if __name__ == '__main__':", "random_util class MyTestCase(unittest.TestCase): def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, ' ') +", "+ random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') + random_util.generate_token()) if __name__ == '__main__': unittest.main()", "def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width,", "20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid())", "column_width = 20 print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ')", "random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') + random_util.generate_token())", "print(\"Generated id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated", "import unittest import random_util class MyTestCase(unittest.TestCase): def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width,", "') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ')", "print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width, ' ') + random_util.generate_token()) if", "unittest import random_util class MyTestCase(unittest.TestCase): def test_visualize_results(self): column_width = 20 print(\"Generated id:\".rjust(column_width, '", "id:\".rjust(column_width, ' ') + random_util.generate_id()) print(\"Generated uuid:\".rjust(column_width, ' ') + random_util.generate_uuid()) print(\"Generated token:\".rjust(column_width," ]
[ "django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last", "verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True,", "django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial", "('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ],", "help_text='The groups this user belongs to. A user will get all permissions granted", "accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get", "= [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')),", "('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all", "('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff", "('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True,", "with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and", "('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of", "of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True,", "150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),", "verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of", "related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission',", "verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')), ('groups',", "groups this user belongs to. A user will get all permissions granted to", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser',", "operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128,", "2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone", "Django 2.1.4 on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations,", "('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.',", "max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True,", "verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'),", "of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this", "[ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login',", "('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True,", "fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates", "models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer.", "verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email", "validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin", "permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True,", "models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')],", "models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions", "email send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs", "all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions',", "get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),", "models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={", "('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this", "] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password',", "('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other',", "permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', }, managers=[ ('objects', django.contrib.auth.models.UserManager()),", "14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class", "verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has", "Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [", "default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True,", "models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True,", "birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send", "options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ),", "('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or", "'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]", "models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),", "null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female',", "A user will get all permissions granted to each of their groups.', related_name='user_set',", "class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations =", "them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required.", "verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender',", "for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural':", "user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits", "to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', }, managers=[", "'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')),", "'0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True,", "verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A", "this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti", "Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location',", "migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth',", "from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies", "import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ]", "verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150", "on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import", "models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser", "max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into", "null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True,", "max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True,", "this user belongs to. A user will get all permissions granted to each", "send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to.", "models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254,", "Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user", "models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola',", "migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True,", "all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with", "('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),", "models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True,", "max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6,", "models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice", "of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send',", "to. A user will get all permissions granted to each of their groups.',", "verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly", "verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True,", "max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice", "('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')),", "verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', }, managers=[ ('objects',", "status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters", "max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12,", "verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')),", "max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True,", "groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set',", "verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),", "('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True,", "whether the user can log into this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now,", "models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions", "2.1.4 on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models", "admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active',", "user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical", "= [ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True,", "belongs to. A user will get all permissions granted to each of their", "[ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "can log into this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn',", "'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place", "help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),", "models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification',", "Generated by Django 2.1.4 on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db", "exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True,", "related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', },", "only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log", "models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user", "('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')),", "'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters,", "= True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User',", "dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id',", "import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration):", "characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff',", "primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False,", "Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether", "user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A", "and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user", "models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this", "that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username',", "True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[", "help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('date_joined',", "models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male',", "username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',", "birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The", "this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)),", "permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that", "('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name',", "null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')),", "django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies =", "the user can log into this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date", "login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning", "user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user',", "that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_", "('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates", "digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the", "site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True,", "('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all", "# Generated by Django 2.1.4 on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from", "this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique':", "models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name',", "user belongs to. A user will get all permissions granted to each of", "related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user',", "models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'),", "user can log into this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),", "('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date',", "will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group',", "log into this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True,", "each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for", "max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30,", "name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True,", "verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.',", "by Django 2.1.4 on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators from django.db import", "choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True,", "their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.',", "verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di", "null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date", "help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],", "models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30,", "help_text='enable email send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user", "import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial =", "max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'),", "models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification", "null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30,", "address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')),", "identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True,", "('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16,", "null=True, verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True,", "permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'],", "joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email", "verbose_name='Codice Fiscale')), ('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')),", "into this admin site.', verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='',", "to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user", "help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering':", "], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ],", "'Altro')], max_length=12, null=True, verbose_name='Genere')), ('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True,", "verbose_name='staff status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')),", "initial = True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations = [ migrations.CreateModel(", "to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions", "status')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('dn', models.CharField(blank=True, default='', max_length=254)), ('is_active', models.BooleanField(default=True, verbose_name='attivo')), ('email',", "null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')),", "('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')), ('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')),", "django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0009_alter_user_last_name_max_length'), ] operations", "verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that", "has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user", "related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ['username'], 'verbose_name_plural': 'Utenti Unical ID',", "explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already", "notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will", "granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific", "or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False,", "import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [", "('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')), ('groups', models.ManyToManyField(blank=True, help_text='The groups", "unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this", "<filename>accounts/migrations/0001_initial.py # Generated by Django 2.1.4 on 2019-02-18 14:37 import django.contrib.auth.models import django.contrib.auth.validators", "null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without", "already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150,", "di identificazione')), ('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')), ('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')), ('codice_fiscale',", "@/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can", "assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'},", "without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username", "null=True, verbose_name='Place of birth')), ('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')), ('access_notification', models.BooleanField(default=True, help_text='enable" ]
[ "self.name) logger.time_log('Testing Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame,", "y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state ) for train_index, test_index", "verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This method allows for training to be", "BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled.", "runner to allow for easy and consistent model creation and evalutation \"\"\" __author__", "transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba =", "y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner supports bare estimator fitting", "x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test", "is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer,", "shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search", "None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is not None: logger.time_log('Starting", "record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)", "y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame", "self.target = target self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None def", "for iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train,", "easy and consistent model creation and evalutation \"\"\" __author__ = \"<NAME>\" __email__ =", "of the a BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged,", "max_iters is not None: for iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train,", "Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing", "scoring=scoring, return_train_score=True ) data_frame = self.df if sample is not None: data_frame =", "x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...')", "transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer", "Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)", "logger = Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame = self.df if sample", "train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n')", "Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold", "x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else:", "pandas as pd from collections import Counter from skopt import BayesSearchCV from sklearn.base", "y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n')", "verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not", "Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)", "= \"Copyright 2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__", "Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' %", "in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment,", "logger.time_log('Transformer Fit Complete.\\n') if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training", "__license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\" import pandas", "logger = Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv,", "self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None def run_classification_experiment( self, sample=None,", "None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)", "y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_train_transformed = transformer.transform(x_train)", "This method allows for training to be done using the joblib parallelism in", "self.df = df self.target = target self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator", "y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result(", "evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is not", "multiclass=multiclass ) logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator =", ") logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator", "batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_train_transformed", "= shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train)", "everything is cleanly logged, evaluated, and pickled. \"\"\" def __init__( self, name, df,", "= EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results,", "a cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else:", "y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...')", "= Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment,", "fit_increment, warm_start, max_iters, random_state ) for train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross", "y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is not None: if", ") logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is not None: if max_iters", "The runner supports bare estimator fitting and searvh-based fitting. By default it will", "search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n')", "y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s' % Counter(y_new)) if hasattr(x_train,", "if max_iters is not None: for iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train,", "batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose)", "y_train[train_index], y_train[test_index] if fit_increment is not None: if max_iters is not None: for", "test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv is not", "sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger =", "self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2,", "Re-Sampling...') logger.log('Original Training Shape is %s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train)", "train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose)", "n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame = self.df", "= batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict", "crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\"", "random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train,", "Complete.\\n') logger.time_log('Training Model...') if fit_increment is not None: if max_iters is not None:", "Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame = self.df if sample is not", "= train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer", "columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train", "= x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train,", "not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target],", "transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state ) for", "verbose=verbose) else: if transformer is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else:", "import Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle from", "= shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train,", "x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not None: logger.time_log('Fitting", "= \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__ = \"Creative", "self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True )", "class Runner: \"\"\" The runner supports bare estimator fitting and searvh-based fitting. By", "a BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and", "test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba", "%s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train =", "in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment,", "x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else:", "y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test,", "skopt import BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib import Parallel, delayed from", "x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results =", "= x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train,", "x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else:", "x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict", "y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer,", "else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is not None: if max_iters", "collections import Counter from skopt import BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib", "transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame =", "y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not None: logger.time_log('Fitting Transformer...')", "sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new", "not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters", "Overall a hacky method to allow for incremental training. Really needs to be", "cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train,", "logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p'", "Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if record_predict_proba:", "batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_fold_train", "None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame =", "searvh-based fitting. By default it will make use of the a BayesianSearchCV to", "to allow for incremental training. Really needs to be refactored into a cleaner", "x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else:", "= EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv is not None:", "return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner supports bare estimator fitting and", "logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters", "fit_increment is not None: if max_iters is not None: for iter in range(max_iters):", "random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not", "Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p'", "Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba,", "hacky method to allow for incremental training. Really needs to be refactored into", "Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose,", "Training Shape is %s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns)", "hyper_parameters=None): self.name = name self.df = df self.target = target self.estimator = estimator", "x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This method", "test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test)", "pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train,", "\"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0 International", "train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state ) for train_index, test_index in", "= shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if", "__copyright__ = \"Copyright 2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\"", "from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle from utility import batch_predict,", "Ensures everything is cleanly logged, evaluated, and pickled. \"\"\" def __init__( self, name,", "\"\"\" def __init__( self, name, df, target, estimator, hyper_parameters=None): self.name = name self.df", "transformer is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test,", "= \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0", "(probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' %", "sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is %s' %", "for training to be done using the joblib parallelism in scikit learn. Overall", "HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict =", "parallelism in scikit learn. Overall a hacky method to allow for incremental training.", "y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame =", "StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train,", "batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters,", "is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if", "Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...')", "Fit Complete.\\n') if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape", "None: for iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train,", "y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not None: logger.time_log('Starting Data", "x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state ) for train_index,", "batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n')", "logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame =", "None: if max_iters is not None: for iter in range(max_iters): x_iter_train, y_iter_train =", "Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new", "= batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing", "and pickled. \"\"\" def __init__( self, name, df, target, estimator, hyper_parameters=None): self.name =", "y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is not None: if", "logger.time_log('Testing Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame,", "logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame =", "shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv", "self.df if sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train,", "x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test", "Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer,", "use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start,", "def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None,", "fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner supports", "= transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict", "BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df", "run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False,", "y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame", "logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test)", "\"\"\" This method allows for training to be done using the joblib parallelism", "transformer is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training", "= Evaluator(logger) data_frame = self.df if sample is not None: data_frame = data_frame.sample(n=sample,", "import BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection", "transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict)", "increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is", "None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)(", "A common training and evaluation runner to allow for easy and consistent model", "supports bare estimator fitting and searvh-based fitting. By default it will make use", "hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index]", "random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not", "max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame =", "results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_ self.hyper_parameters.save('%s_params.p' % self.name)", "= df self.target = target self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator =", "batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator =", "= pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n')", "Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train,", "= Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame = self.df if sample is", "logged, evaluated, and pickled. \"\"\" def __init__( self, name, df, target, estimator, hyper_parameters=None):", "train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training", "not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is not None:", "= hyper_parameters self.trained_estimator = None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False,", "Evaluator(logger) data_frame = self.df if sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state)", "x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test =", "self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self,", "test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' %", "= None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True,", "transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The", "warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame", "transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict =", "self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20,", "test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This method allows for training", "y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name)", "\"\"\" The runner supports bare estimator fitting and searvh-based fitting. By default it", "import pandas as pd from collections import Counter from skopt import BayesSearchCV from", "else: if transformer is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict =", "fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose,", "as pd from collections import Counter from skopt import BayesSearchCV from sklearn.base import", "x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if", "'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling", "else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index],", "n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df if sample is not", "shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train,", "Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test,", "if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame", "random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing", "= EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame", "logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n')", "hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index]", "test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result(", "evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is not None:", "if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is %s'", "and searvh-based fitting. By default it will make use of the a BayesianSearchCV", "y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling", "is not None: for iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state)", "if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling", "for iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train,", "x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_train_transformed =", "else: if transformer is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train,", "logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is %s' % Counter(y_train)) x_new, y_new =", "x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else:", "kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is not None:", "test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' %", "be done using the joblib parallelism in scikit learn. Overall a hacky method", "iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer,", "logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def", "By default it will make use of the a BayesianSearchCV to perform hyperparameter", "evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params =", "Shape is %s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape", "y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params)", "EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner supports bare estimator", "x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled", "not None: if max_iters is not None: for iter in range(max_iters): x_iter_train, y_iter_train", "x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame =", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" A common training and evaluation", "n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' % self.name) search", "logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is not None: logger.time_log('Starting Data", "Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)", "logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)", "model creation and evalutation \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ =", "self.hyper_parameters = hyper_parameters self.trained_estimator = None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False,", "cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name)", "training to be done using the joblib parallelism in scikit learn. Overall a", "import clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split from", "= data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer", "Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout", "import Counter from skopt import BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib import", "y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train)", "random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment,", "self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df if sample", "test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is", "None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None,", "logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test,", "__version__ = \"1.0\" import pandas as pd from collections import Counter from skopt", "None: if max_iters is not None: for iter in range(max_iters): x_fold_train, y_fold_train =", "y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator,", "EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False)", "common training and evaluation runner to allow for easy and consistent model creation", "logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training", "Complete.\\n') if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is", "utf-8 -*- \"\"\" A common training and evaluation runner to allow for easy", "x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not None: logger.time_log('Starting", "x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame", "'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if", "warm_start, max_iters, random_state ) for train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation", "= target self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None def run_classification_experiment(", "cv is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores =", "Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...')", "from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer,", "x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index]", "y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer,", "= BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame =", "estimator fitting and searvh-based fitting. By default it will make use of the", "random_state): \"\"\" This method allows for training to be done using the joblib", "to allow for easy and consistent model creation and evalutation \"\"\" __author__ =", "Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results", "Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' %", "pd from collections import Counter from skopt import BayesSearchCV from sklearn.base import clone", "logger.log('Original Training Shape is %s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced", "Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_,", "BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import", "search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame", "training and evaluation runner to allow for easy and consistent model creation and", "fit_increment, warm_start, max_iters, random_state): \"\"\" This method allows for training to be done", "is not None: for iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state)", "logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index,", "self, name, df, target, estimator, hyper_parameters=None): self.name = name self.df = df self.target", ") data_frame = self.df if sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state)", "df self.target = target self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None", "multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator,", "None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training", "verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner", "self.name) evaluator = Evaluator(logger) data_frame = self.df if sample is not None: data_frame", "import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train,", "fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test,", "= EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner supports bare", "= name self.df = df self.target = target self.estimator = estimator self.hyper_parameters =", "scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger", "Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is", "perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled. \"\"\" def", "range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose)", "test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test,", "in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is not", "self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator,", "sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle", "is not None: if max_iters is not None: for iter in range(max_iters): x_fold_train,", "if fit_increment is not None: if max_iters is not None: for iter in", "a hacky method to allow for incremental training. Really needs to be refactored", "verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df if sample is not None: data_frame", "data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if", "batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict =", "the joblib parallelism in scikit learn. Overall a hacky method to allow for", "needs to be refactored into a cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train,", "logger.time_log('Training Model...') if fit_increment is not None: if max_iters is not None: for", "from sklearn.base import clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold,", "y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment", "run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None):", "EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame =", "estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20,", "use_project_path() logger = Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter,", "= x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train,", "not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing", "batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...')", "if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test,", "warm_start, max_iters, random_state): \"\"\" This method allows for training to be done using", "if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index],", "not None: if max_iters is not None: for iter in range(max_iters): x_fold_train, y_fold_train", "= StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer,", "consistent model creation and evalutation \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__", "y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing", "method allows for training to be done using the joblib parallelism in scikit", "cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df if sample is not None:", "random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path()", "% self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass", "y_train) logger.log('Balanced Training Shape is %s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new =", "None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba)", "data_frame[self.target], test_size=test_size) if transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n')", "is %s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train", "EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba,", "\"\"\" A common training and evaluation runner to allow for easy and consistent", "logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter", "import shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def", "y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_fold_train = transformer.transform(x_fold_train)", "None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is %s' % Counter(y_train)) x_new, y_new", "'iloc'): y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if", "sklearn.utils import shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier", "data_frame[self.target], test_size=test_size) if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape", "= batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator", "= EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer,", "evalutation \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\"", "form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test", "= estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None def run_classification_experiment( self, sample=None, random_state=None,", "from collections import Counter from skopt import BayesSearchCV from sklearn.base import clone from", "if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold =", "transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv", "y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not None: logger.time_log('Starting Data Re-Sampling...')", "x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'): y_fold_train, y_fold_test =", "test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator", "= shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train,", "incremental training. Really needs to be refactored into a cleaner form. \"\"\" if", "name self.df = df self.target = target self.estimator = estimator self.hyper_parameters = hyper_parameters", "% self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba =", "test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger", "python # -*- coding: utf-8 -*- \"\"\" A common training and evaluation runner", "target, estimator, hyper_parameters=None): self.name = name self.df = df self.target = target self.estimator", "fitting. By default it will make use of the a BayesianSearchCV to perform", "License\" __version__ = \"1.0\" import pandas as pd from collections import Counter from", "kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator),", "use of the a BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly", "self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df if", "None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating", "y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)", "delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state", "= y_train[train_index], y_train[test_index] if fit_increment is not None: if max_iters is not None:", "Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params", "sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle from utility import batch_predict, batch_predict_proba,", "self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator,", "x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train =", "= batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame", "verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name) evaluator", "fitting and searvh-based fitting. By default it will make use of the a", "is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state)", "y_train[test_index] if fit_increment is not None: if max_iters is not None: for iter", "to be done using the joblib parallelism in scikit learn. Overall a hacky", "is %s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is", "Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring,", "shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator,", "not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is %s' % Counter(y_train)) x_new,", "joblib parallelism in scikit learn. Overall a hacky method to allow for incremental", "default it will make use of the a BayesianSearchCV to perform hyperparameter tuning.", "y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if", "\"Copyright 2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__ =", "-*- coding: utf-8 -*- \"\"\" A common training and evaluation runner to allow", "transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is", "record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt'", "is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is not", "self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False,", "y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...')", "not None: for iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator,", "= self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3,", "Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not", "fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame =", "max_iters, random_state ) for train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n')", "range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)", "verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator,", "self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold = StratifiedKFold(n_splits=cv,", "transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train,", "= train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original", "y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if", "__author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__ =", "y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame)", "__email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike", "self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5,", "if transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling", "International License\" __version__ = \"1.0\" import pandas as pd from collections import Counter", "to be refactored into a cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test", "Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p'", "target self.estimator = estimator self.hyper_parameters = hyper_parameters self.trained_estimator = None def run_classification_experiment( self,", "%s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s'", "not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross", "coding: utf-8 -*- \"\"\" A common training and evaluation runner to allow for", "bare estimator fitting and searvh-based fitting. By default it will make use of", "for easy and consistent model creation and evalutation \"\"\" __author__ = \"<NAME>\" __email__", "x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None", "increment=fit_increment, verbose=verbose) else: if transformer is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train)", "fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name) evaluator = Evaluator(logger)", "sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' %", ") for train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...')", "delayed from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle from utility import", "pickled. \"\"\" def __init__( self, name, df, target, estimator, hyper_parameters=None): self.name = name", "make use of the a BayesianSearchCV to perform hyperparameter tuning. Ensures everything is", "data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is", "x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not None:", "the a BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated,", "% self.name) evaluator = Evaluator(logger) data_frame = self.df if sample is not None:", "Training Shape is %s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training", "test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state ) for train_index, test_index in kfold.split(x_train,", "is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring,", "not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False)", "clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state )", "logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not None:", "Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start,", "= y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is not", "iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer,", "transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This", "increment=fit_increment, verbose=verbose) else: if transformer is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train)", "Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing", "using the joblib parallelism in scikit learn. Overall a hacky method to allow", "x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train, 'iloc'):", "self.trained_estimator = None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5,", "it will make use of the a BayesianSearchCV to perform hyperparameter tuning. Ensures", "is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs,", "= batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if", "logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict", "from sklearn.utils import shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path,", "\"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test =", "name, df, target, estimator, hyper_parameters=None): self.name = name self.df = df self.target =", "Data Re-Sampling...') logger.log('Original Training Shape is %s' % Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train,", "random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt'", "logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is not None: if max_iters is", "x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_fold_train =", "Counter from skopt import BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib import Parallel,", "from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import", "x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s' % Counter(y_new)) if", "y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame", "test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator,", "Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame", "if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def run_classification_search_experiment(", "= \"1.0\" import pandas as pd from collections import Counter from skopt import", "hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n')", "if sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test", "Shape is %s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train,", "use_project_path() logger = Logger('%s.txt' % self.name) evaluator = Evaluator(logger) data_frame = self.df if", "record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame,", "random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is not", "= Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_", "y_train = x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train,", "self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose)", "logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if", "test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_ self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = results.best_estimator_", "transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict =", "evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_ self.hyper_parameters.save('%s_params.p' %", "and evaluation runner to allow for easy and consistent model creation and evalutation", "multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger =", "and evalutation \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019,", "cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close()", "y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is not None:", "fold_predict_proba_frame) class Runner: \"\"\" The runner supports bare estimator fitting and searvh-based fitting.", "evaluation runner to allow for easy and consistent model creation and evalutation \"\"\"", "EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition", "is not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame,", "transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer", "= None if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test,", "Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_ self.hyper_parameters.save('%s_params.p'", "y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None: self.estimator.set_params(**self.hyper_parameters.params) if cv is", "None: for iter in range(max_iters): x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train,", "training. Really needs to be refactored into a cleaner form. \"\"\" if hasattr(x_train,", "y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p'", "max_iters is not None: for iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train,", "verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None", "y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index] else: y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is", "= Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose,", "(probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing", "self.name = name self.df = df self.target = target self.estimator = estimator self.hyper_parameters", "= sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s' % Counter(y_new)) if hasattr(x_train, 'columns'):", "allow for easy and consistent model creation and evalutation \"\"\" __author__ = \"<NAME>\"", "utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train,", "for incremental training. Really needs to be refactored into a cleaner form. \"\"\"", "else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None:", "Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if", "\"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\" import pandas as pd", "x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba: y_fold_test_predict_proba", "self.estimator.set_params(**self.hyper_parameters.params) if cv is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...')", "logger.log('Balanced Training Shape is %s' % Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new,", "verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not", "not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)(", "logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)", "train_test_split from sklearn.utils import shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger,", "and consistent model creation and evalutation \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "is not None: if max_iters is not None: for iter in range(max_iters): x_iter_train,", "self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_,", "record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This method allows for training to", "Runner: \"\"\" The runner supports bare estimator fitting and searvh-based fitting. By default", "train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p' % self.name)", "transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed,", "train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_ self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator =", "Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s' % Counter(y_new))", "x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is not None:", "into a cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index]", "shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train,", "2019, <NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\"", "batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner:", "logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame =", "if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index],", "y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is not None: if max_iters is not", "def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False,", "= batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout", "cleanly logged, evaluated, and pickled. \"\"\" def __init__( self, name, df, target, estimator,", "__init__( self, name, df, target, estimator, hyper_parameters=None): self.name = name self.df = df", "verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' % self.name) search = BayesSearchCV(", "data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling is", "verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters,", "StratifiedKFold, train_test_split from sklearn.utils import shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator,", "= batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class", "% self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1,", "creation and evalutation \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright", "sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test =", "sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1):", "y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train,", "EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame,", "test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing", "y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None", "verbose, fit_increment, warm_start, max_iters, random_state ) for train_index, test_index in kfold.split(x_train, y_train) )", "Commons Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\" import pandas as pd from", "verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv is", "batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)", "train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This method allows for", "= transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test,", "record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame =", "else: batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is not None:", "learn. Overall a hacky method to allow for incremental training. Really needs to", "test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame,", "Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train,", "method to allow for incremental training. Really needs to be refactored into a", "Really needs to be refactored into a cleaner form. \"\"\" if hasattr(x_train, 'iloc'):", "= \"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\" import pandas as", "return_train_score=True ) data_frame = self.df if sample is not None: data_frame = data_frame.sample(n=sample,", "= EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame = None if record_predict_proba: logger.time_log('Testing Holdout", "Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\" import pandas as pd from collections", "sampling=None): use_project_path() logger = Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs,", "x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\"", "= search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train) logger.time_log('Testing", "% self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space, n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True", "if cv is not None: kfold = StratifiedKFold(n_splits=cv, random_state=random_state) logger.time_log('Cross Validating Model...') fold_scores", "None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame", "import StratifiedKFold, train_test_split from sklearn.utils import shuffle from utility import batch_predict, batch_predict_proba, EvaluationFrame,", "transformer=None, fit_increment=None, warm_start=False, max_iters=None, n_jobs=-1): use_project_path() logger = Logger('%s.txt' % self.name) evaluator =", "= None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame", "estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame", "batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index,", "transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original", "evaluator = Evaluator(logger) data_frame = self.df if sample is not None: data_frame =", "done using the joblib parallelism in scikit learn. Overall a hacky method to", "= self.df if sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train, x_test,", "= EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing", "self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass )", "def __init__( self, name, df, target, estimator, hyper_parameters=None): self.name = name self.df =", "max_iters, random_state): \"\"\" This method allows for training to be done using the", "None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose)", "from skopt import BayesSearchCV from sklearn.base import clone from sklearn.externals.joblib import Parallel, delayed", "random_state ) for train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training", "Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)", "cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' % self.name) search =", "Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment,", "self, scoring, sample=None, random_state=None, test_size=0.20, n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path()", "test_size=test_size) if sampling is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is", "if record_predict_proba: y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba) return", "logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close()", "increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: if transformer is", "EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores)", "y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting HyperParameter Search...') results = search.fit(x_train,", "data_frame = self.df if sample is not None: data_frame = data_frame.sample(n=sample, random_state=random_state) x_train,", "Validating Model...') fold_scores = Parallel(n_jobs=n_jobs, verbose=3)( delayed(crossfold_classifier)( clone(self.estimator), transformer, x_train, y_train, train_index, test_index,", "% Counter(y_train)) x_new, y_new = sampling.fit_resample(x_train, y_train) logger.log('Balanced Training Shape is %s' %", "if transformer is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train)", "is not None: x_train_transformed = transformer.transform(x_train) self.estimator.fit(x_train_transformed, y_train) else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n')", "self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is not None: self.hyper_parameters.save('%s_params.p'", "df, target, estimator, hyper_parameters=None): self.name = name self.df = df self.target = target", "Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if sampling is not None: logger.time_log('Starting Data Re-Sampling...')", "% Counter(y_new)) if hasattr(x_train, 'columns'): x_new = pd.DataFrame(x_new, columns=x_train.columns) x_train, y_train = x_new,", "# -*- coding: utf-8 -*- \"\"\" A common training and evaluation runner to", "hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled. \"\"\" def __init__(", "estimator, hyper_parameters=None): self.name = name self.df = df self.target = target self.estimator =", "batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False) fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict) fold_predict_proba_frame = None if record_predict_proba:", "x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger)", "Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame) class Runner: \"\"\" The runner supports bare estimator fitting and searvh-based", "verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator,", "None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() if self.hyper_parameters is", "record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state ) for train_index, test_index in kfold.split(x_train, y_train)", "y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state) batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(estimator,", "in scikit learn. Overall a hacky method to allow for incremental training. Really", "allows for training to be done using the joblib parallelism in scikit learn.", "batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index,", "to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled. \"\"\"", "\"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__", "y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n') evaluator = Evaluator(logger) evaluator.evaluate_classifier_result( results, test_evaluation_frame, train=train_evaluation_frame,", "\"1.0\" import pandas as pd from collections import Counter from skopt import BayesSearchCV", "y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test) test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba) test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name) logger.time_log('Testing Complete.\\n')", "Model...') if fit_increment is not None: if max_iters is not None: for iter", "be refactored into a cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test =", "EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n')", "refactored into a cleaner form. \"\"\" if hasattr(x_train, 'iloc'): x_fold_train, x_fold_test = x_train.iloc[train_index],", "evaluated, and pickled. \"\"\" def __init__( self, name, df, target, estimator, hyper_parameters=None): self.name", "not None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring, sample=None,", "hyper_parameters self.trained_estimator = None def run_classification_experiment( self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None,", "def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state):", "logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose) test_proba_evaluation_frame = EvaluationFrame(y_test,", "n_jobs=n_jobs, n_iter=n_iter, cv=cv, verbose=verbose, scoring=scoring, return_train_score=True ) data_frame = self.df if sample is", "runner supports bare estimator fitting and searvh-based fitting. By default it will make", "random_state=random_state) batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose) else: batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment,", "if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass )", "test_size=test_size) if transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit Complete.\\n') if", "if transformer is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict = batch_predict(estimator,", "tuning. Ensures everything is cleanly logged, evaluated, and pickled. \"\"\" def __init__( self,", "train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment", "if max_iters is not None: for iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train,", "\"<NAME>\" __email__ = \"<EMAIL>\" __copyright__ = \"Copyright 2019, <NAME>\" __license__ = \"Creative Commons", "self, sample=None, random_state=None, test_size=0.20, multiclass=False, record_predict_proba=False, sampling=None, cv=5, verbose=True, transformer=None, fit_increment=None, warm_start=False, max_iters=None,", "batch_predict(results.best_estimator_, x_test) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name) test_proba_evaluation_frame =", "Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils import shuffle from utility", "x_train) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing Holdout Partition...') y_test_predict = batch_predict(results.best_estimator_,", "sklearn.base import clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split", "x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') if self.hyper_parameters is not None:", "-*- \"\"\" A common training and evaluation runner to allow for easy and", "verbose=verbose) else: if transformer is not None: x_fold_train = transformer.transform(x_fold_train) estimator.fit(x_fold_train, y_fold_train) y_fold_test_predict", "record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' % self.name) search = BayesSearchCV( self.estimator, self.hyper_parameters.search_space,", "results = search.fit(x_train, y_train) logger.time_log('Search Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(results.best_estimator_, x_train)", "Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass", "logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n')", "None: self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator = self.estimator def run_classification_search_experiment( self, scoring, sample=None, random_state=None,", "4.0 International License\" __version__ = \"1.0\" import pandas as pd from collections import", "scikit learn. Overall a hacky method to allow for incremental training. Really needs", "else: self.estimator.fit(x_train, y_train) logger.time_log('Training Complete.\\n') logger.time_log('Testing Training Partition...') y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer,", "not None: for iter in range(max_iters): x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state) batch_fit_classifier(self.estimator,", "is cleanly logged, evaluated, and pickled. \"\"\" def __init__( self, name, df, target,", "Validation Complete.\\n') logger.time_log('Training Model...') if fit_increment is not None: if max_iters is not", "batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' % self.name)", "will make use of the a BayesianSearchCV to perform hyperparameter tuning. Ensures everything", "y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') train_evaluation_frame = EvaluationFrame(y_train, y_train_predict) logger.time_log('Testing", "= data_frame.sample(n=sample, random_state=random_state) x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size) if sampling", "Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state) logger.time_log('Shuffling Complete.\\n') logger.time_log('Starting", "clone from sklearn.externals.joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.utils", "is not None: logger.time_log('Starting Data Re-Sampling...') logger.log('Original Training Shape is %s' % Counter(y_train))", "test_evaluation_frame, train=train_evaluation_frame, test_proba=test_proba_evaluation_frame, multiclass=multiclass ) logger.close() self.hyper_parameters.params = results.best_params_ self.hyper_parameters.save('%s_params.p' % self.name) self.trained_estimator", "n_jobs=-1, n_iter=2, cv=5, verbose=3, multiclass=False, record_predict_proba=False, sampling=None): use_project_path() logger = Logger('%s.txt' % self.name)", "allow for incremental training. Really needs to be refactored into a cleaner form.", "= batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose) logger.time_log('Testing Complete.\\n') test_evaluation_frame = EvaluationFrame(y_test, y_test_predict) test_evaluation_frame.save('%s_predict.p' %", "train_test_split(data_frame, data_frame[self.target], test_size=test_size) if transformer is not None: logger.time_log('Fitting Transformer...') transformer.fit(x_train) logger.time_log('Transformer Fit", "x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index] else: x_fold_train, x_fold_test = x_train[train_index], x_train[test_index] if hasattr(y_train,", "for train_index, test_index in kfold.split(x_train, y_train) ) logger.time_log('Cross Validation Complete.\\n') logger.time_log('Training Model...') if", "= None if record_predict_proba: logger.time_log('Testing Holdout Partition (probability)...') y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer,", "y_train, train_index, test_index, record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state): \"\"\" This method allows", "<NAME>\" __license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\" __version__ = \"1.0\" import", "x_new, y_new logger.time_log('Re-Sampling Complete.\\n') logger.time_log('Shuffling Re-Sampled Data.\\n') x_train, y_train = shuffle(x_train, y_train, random_state=random_state)", "% self.name) logger.time_log('Testing Complete.\\n') if cv is not None: evaluator.evaluate_fold_scores(fold_scores) evaluator.evaluate_classifier_result( self.estimator, test_evaluation_frame,", "y_fold_train, y_fold_test = y_train[train_index], y_train[test_index] if fit_increment is not None: if max_iters is" ]
[ "from almanak.file import compress, decompress, extract, fileinfo __ALL__ = ['compress', 'decompress', 'extract', 'fileinfo']" ]
[ "not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to", "== STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting", "from utime import ticks_ms import network import time from umqtt.simple import MQTTClient STATE_DISCONNECTED", "= STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword", "STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): # keeping only the latest value self._data[topic]", "self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state =", "value self._data[topic] = data self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING \\ and", "if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state ==", "> WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling back to disconnected state\") self._state", "state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED", "connection failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt", "self.__flush() print(\"mqtt connection established\") except Exception as ex: self.__printException(ex) def __flush(self): if self._state", "to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception as", "def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state =", "except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS:", "== STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState()", "as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not", "self._data[key]) del self._data[key] except Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self,", "= network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt =", "self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp = None self._state", "= STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state =", "ticks_ms import network import time from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING", "None self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp = None", "def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp =", "import network import time from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING =", "0, mqttUsername, mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic,", "* 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager: def __init__(self): self._wlan =", "mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if self._state ==", "== STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): # keeping only the latest value", "{} def configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword = password def configureMqtt(self,", "Falling back to disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not", "else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception as", "ssid self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId,", "self._mqtt = None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data = {} def", "def __flush(self): if self._state == STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key, self._data[key])", "STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED", "= STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if", "30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager: def __init__(self): self._wlan", "= password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0,", "if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True)", "self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template = \"An exception of type", "\\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt:", "STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def", "ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\")", "not connect to wlan. Falling back to disconnected state\") self._state = STATE_DISCONNECTED elif", "except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS:", "data self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state", "__updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try:", "= data self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected:", "__init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp = None", "* 1000 class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword", "self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex)", "def update(self): if self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED", "= 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED =", "self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid, password):", "mqttUsername, mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data):", "self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling back to disconnected state\")", "password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername,", "connect to wlan. Falling back to disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected()", "wlan. Falling back to disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and", "mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception as ex:", "def configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword = password def configureMqtt(self, mqttClientId,", "STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS", "self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\")", "self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if", "self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data", "self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp =", "STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000", "self._data = {} def configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword = password", "network import time from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1", "Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT", "mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): #", "def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): # keeping", "if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if", "if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling back", "= STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template = \"An exception of type {0}", "__updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED", "1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30", "self._wlanPassword) except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp >", "self.__printException(ex) def __printException(self, ex): template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"", "configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer,", "self._state = STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid, password): self._wlanSsid = ssid", "update(self): if self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if", "self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state", "def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword)", "and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting", "failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection", "= STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception as ex: self.__printException(ex) def __flush(self):", "= 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager: def __init__(self):", "__printException(self, ex): template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\" message =", "STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword =", "password): self._wlanSsid = ssid self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword):", "__flush(self): if self._state == STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key, self._data[key]) del", "> STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING:", "= 30 * 1000 class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid =", "in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as ex: self._state = STATE_WLAN_CONNECTED", "1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF)", "# keeping only the latest value self._data[topic] = data self.__flush() def update(self): if", "elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self):", "print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush()", "back to disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0':", "self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state", "ticks_ms() try: self._mqtt.connect() except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() -", "self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect()", "self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def", "MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def", "WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager: def", "self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp", "ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect", "self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED:", "mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self):", "def __printException(self, ex): template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\" message", "wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except", "- self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling back to disconnected", "1000 class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword =", "= None self._state = STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid, password): self._wlanSsid", "initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): # keeping only", "STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def", "del self._data[key] except Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex):", "not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state ==", "self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state", "= None self._mqtt = None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data =", "print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception", "disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state =", "ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp", "2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS", "= 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000", "STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state", "self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan", "ex): template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\" message = template.format(type(ex).__name__,", "mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if", "STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS =", "None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid,", "MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3", "ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else: try:", "configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def", "only the latest value self._data[topic] = data self.__flush() def update(self): if self._state >", "key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as ex: self._state =", "print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp", "self.__connectWlan() def publish(self, topic, data): # keeping only the latest value self._data[topic] =", "utime import ticks_ms import network import time from umqtt.simple import MQTTClient STATE_DISCONNECTED =", "4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager:", "self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms()", "as ex: self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED: try: for key in", "try: for key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as ex:", "except Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template =", "try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception as ex:", "STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception", "connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp =", "def publish(self, topic, data): # keeping only the latest value self._data[topic] = data", "if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else:", "0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4", "= ssid self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt =", "ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling back to", "= None self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp =", "= ticks_ms() try: self._mqtt.connect() except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms()", "from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2", "try: self._mqtt.connect() except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp", "= STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception as ex: self.__printException(ex) def", "= 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30", "= STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex) def", "self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\")", "def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms()", "STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as", "umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING", "Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template = \"An", "- self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping()", "def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING", "self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception", "ssid, password): self._wlanSsid = ssid self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername,", "the latest value self._data[topic] = data self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING", "self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() -", "None self._mqtt = None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data = {}", "MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid", "self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception as ex: self.__printException(ex)", "self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception as ex: self.__printException(ex) def", "= 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000 class", "and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state", "self._data[topic] = data self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING \\ and not", "self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception as ex: self.__printException(ex)", "Exception as ex: self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED: try: for key", "to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword)", "self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except", "list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex)", "if self._state == STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key]", "Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could", "= None self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp = None self._state =", "self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): # keeping only the latest", "= STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\")", "self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state = STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if", "STATE_WLAN_CONNECTED print(\"wlan connected\") def __updateWlanConnectedState(self): if self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING", "for key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception as ex: self._state", "as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template = \"An exception", "data): # keeping only the latest value self._data[topic] = data self.__flush() def update(self):", "if self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state", "> MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state =", "== STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except Exception", "3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000 MQTT_CONNECTION_TIMEOUT_MS = 30 *", "import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING =", "STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self):", "try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if", "= MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan()", "mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self,", "STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except Exception as ex: self.__printException(ex) def __flush(self): if", "def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan.", "import ticks_ms import network import time from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0", "connection established\") except Exception as ex: self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED:", "established\") except Exception as ex: self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED: try:", "ex: self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED: try: for key in list(self._data):", "self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key,", "30 * 1000 class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None", "= \"An exception of type {0} occurred. Arguments:\\n{1!r}\" message = template.format(type(ex).__name__, ex.args) print(message)", "if self._state == STATE_DISCONNECTED: self.__connectWlan() def publish(self, topic, data): # keeping only the", "STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect() except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self):", "= None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED self._data = {} def configureWlan(self,", "to wlan. Falling back to disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\", "template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\" message = template.format(type(ex).__name__, ex.args)", "self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex: self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp", "= {} def configureWlan(self, ssid, password): self._wlanSsid = ssid self._wlanPassword = password def", "print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid,", "= ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as", "self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt", "self._mqtt.connect() except Exception as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp >", "None self._state = STATE_DISCONNECTED self._data = {} def configureWlan(self, ssid, password): self._wlanSsid =", "\\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if", "ticks_ms() self._state = STATE_WLAN_CONNECTING try: self._wlan.active(True) self._wlan.disconnect() self._wlan.connect(self._wlanSsid, self._wlanPassword) except Exception as ex:", "ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template = \"An exception of", "if self._mqtt: print(\"connecting to mqtt\") self._state = STATE_MQTT_CONNECTING self._mqttConnectingTimestamp = ticks_ms() try: self._mqtt.connect()", "time from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED =", "self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState()", "self._data[key] except Exception as ex: self._state = STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template", "== STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms()", "STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state", "self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self):", "self._wlanSsid = ssid self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt", "__updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling", "print(\"mqtt connection established\") except Exception as ex: self.__printException(ex) def __flush(self): if self._state ==", "if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp", "latest value self._data[topic] = data self.__flush() def update(self): if self._state > STATE_WLAN_CONNECTING \\", "print(\"Could not connect to wlan. Falling back to disconnected state\") self._state = STATE_DISCONNECTED", "network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None self._wlanConnectingTimestamp = None self._mqtt = None", "mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if self._state", "self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword) def initConnection(self): if self._state == STATE_DISCONNECTED:", "WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to wlan. Falling back to disconnected state\") self._state =", "= STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\") except", "self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState() if self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING:", "None self._wlanConnectingTimestamp = None self._mqtt = None self._mqttConnectingTimestamp = None self._state = STATE_DISCONNECTED", "self._state == STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid:", "self._wlanPassword = password def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword): self._mqtt = MQTTClient(mqttClientId, mqttServer,", "topic, data): # keeping only the latest value self._data[topic] = data self.__flush() def", "__connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state = STATE_WLAN_CONNECTING try:", "MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection failed.\") self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED", "self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to wlan...\") self._wlanConnectingTimestamp = ticks_ms() self._state =", "class ConnectionManager: def __init__(self): self._wlan = network.WLAN(network.STA_IF) self._wlanSsid = None self._wlanPassword = None", "except Exception as ex: self.__printException(ex) def __flush(self): if self._state == STATE_MQTT_CONNECTED: try: for", "STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state == STATE_WLAN_CONNECTING: self.__updateWlanConnectingState()", "STATE_WLAN_CONNECTED: self.__updateWlanConnectedState() if self._state == STATE_MQTT_CONNECTING: self.__updateMqttConnectingState() def __connectWlan(self): if self._wlanSsid: print(\"connecting to", "import time from umqtt.simple import MQTTClient STATE_DISCONNECTED = 0 STATE_WLAN_CONNECTING = 1 STATE_WLAN_CONNECTED", "keeping only the latest value self._data[topic] = data self.__flush() def update(self): if self._state", "publish(self, topic, data): # keeping only the latest value self._data[topic] = data self.__flush()", "STATE_WLAN_CONNECTED self.__printException(ex) def __printException(self, ex): template = \"An exception of type {0} occurred.", "self._state == STATE_MQTT_CONNECTED: try: for key in list(self._data): self._mqtt.publish(key, self._data[key]) del self._data[key] except", "to disconnected state\") self._state = STATE_DISCONNECTED elif self._wlan.isconnected() \\ and not self._wlan.ifconfig()[0]=='0.0.0.0': self._state", "as ex: self.__printException(ex) def __updateMqttConnectingState(self): if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS: print(\"MQTT connection", "self._state = STATE_WLAN_CONNECTED else: try: self._mqtt.ping() self._state = STATE_MQTT_CONNECTED self.__flush() print(\"mqtt connection established\")", "self.__printException(ex) def __updateWlanConnectingState(self): if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS: print(\"Could not connect to", "= 1 STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS =", "self._state > STATE_WLAN_CONNECTING \\ and not self._wlan.isconnected: self._state = STATE_DISCONNECTED if self._state ==", "STATE_WLAN_CONNECTED = 2 STATE_MQTT_CONNECTING = 3 STATE_MQTT_CONNECTED = 4 WLAN_CONNECTION_TIMEOUT_MS = 30 *" ]
[ "distutils.core distutils.core.setup( name='Ymir', author='<NAME>', author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage", "<reponame>karlcow/ymir<filename>setup.py<gh_stars>1-10 import distutils.core distutils.core.setup( name='Ymir', author='<NAME>', author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script", "author='<NAME>', author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage La Grange blog", "author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage La Grange blog http://www.la-grange.net/',", "packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage La Grange blog http://www.la-grange.net/', long_description=open('README.txt').read(), )", "name='Ymir', author='<NAME>', author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage La Grange", "distutils.core.setup( name='Ymir', author='<NAME>', author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage La", "import distutils.core distutils.core.setup( name='Ymir', author='<NAME>', author_email='<EMAIL>', version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to", "version='0.1dev', packages=['ymir', ], license='LICENSE.txt', url='http://pypi.python.org/pypi/Ymir/', description='script to manage La Grange blog http://www.la-grange.net/', long_description=open('README.txt').read()," ]
[ "<reponame>yq-wen/probabilistic_nlg config_fingerprint = None config = None log_writer = None isTrain = True" ]
[ "noinspection PyUnresolvedReferences from db import order # noinspection PyUnresolvedReferences from db import user", "<reponame>rcastleman/twilio-and-sendgrid-python-course<gh_stars>10-100 # noinspection PyUnresolvedReferences from db import order # noinspection PyUnresolvedReferences from db", "# noinspection PyUnresolvedReferences from db import order # noinspection PyUnresolvedReferences from db import" ]
[ "departmental dropdown choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self): return self.name", "logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255) def", "return self.name # Directory information for users with psuPublish = y. Upstream logic", "be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University', 'Portland", "and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University', 'Portland State University'),", "return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField()", "= y. Upstream logic assumes that # all fields except psu_uuid are to", "default=\"97201\") def __str__(self): # pragma: no cover return self.psu_uuid # Password reset contact", "psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland", "= models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title =", "= models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True,", "null=True, blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax =", "logic assumes that # all fields except psu_uuid are to be rendered and", "def __str__(self): return self.name # Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True) code", "null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True)", "PhoneNumberField from MyInfo.validators import validate_psu_phone import logging logger = logging.getLogger(__name__) # PSU Mailcode", "models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True)", "no cover return self.psu_uuid # Password reset contact information. class ContactInformation(models.Model): psu_uuid =", "django.db import models from localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import", "blank=True, default=\"97201\") def __str__(self): # pragma: no cover return self.psu_uuid # Password reset", "self.code + \" -- \" + self.description # For departmental dropdown choices. class", "null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code", "information for users with psuPublish = y. Upstream logic assumes that # all", "class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email =", "ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254,", "= models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model):", "blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True,", "Directory information for users with psuPublish = y. Upstream logic assumes that #", "<reponame>hhauer/myinfo from django.db import models from localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import", "Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return self.code", "models.CharField(max_length=255) def __str__(self): return self.code + \" -- \" + self.description # For", "blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True,", "blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32,", "models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def", "# all fields except psu_uuid are to be rendered and editable. class DirectoryInformation(models.Model):", "blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code =", "primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone =", "fields except psu_uuid are to be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES =", "models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state", "all fields except psu_uuid are to be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES", "# For departmental dropdown choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self):", "# Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField() message =", "SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\")", "self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField() message", "State University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True,", "fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department =", "reset contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True,", "State University Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company", "= models.CharField(max_length=255) def __str__(self): return self.code + \" -- \" + self.description #", "Building(models.Model): name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self): return self.name", "# Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def", "__str__(self): return self.name # Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True) code =", "rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University', 'Portland State", "import models from localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import logging", "= ( ('Portland State University', 'Portland State University'), ('Portland State University Foundation', 'PSU", "Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField() message = models.TextField()", "Upstream logic assumes that # all fields except psu_uuid are to be rendered", "class Building(models.Model): name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self): return", "except psu_uuid are to be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = (", "localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import logging logger = logging.getLogger(__name__)", "null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True,", "models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): # pragma: no cover return self.psu_uuid #", "= models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10,", "MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField() message = models.TextField() def __str__(self): #", "description = models.CharField(max_length=255) def __str__(self): return self.code + \" -- \" + self.description", "null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825", "information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email", "cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid", "end_display = models.DateTimeField() message = models.TextField() def __str__(self): # pragma: no cover return", "editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32,", "from django.db import models from localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone", "blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address =", "default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True,", "models.CharField(max_length=255, unique=True) def __str__(self): return self.name # Buildings class Building(models.Model): name = models.CharField(max_length=255,", "State University', 'Portland State University'), ('Portland State University Foundation', 'PSU Foundation'), ) psu_uuid", "with psuPublish = y. Upstream logic assumes that # all fields except psu_uuid", "blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\")", "contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True)", "validate_psu_phone import logging logger = logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code =", "self.psu_uuid # Password reset contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True)", "= models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): # pragma: no cover return self.psu_uuid", "Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code", "class Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self): return self.name # Buildings class", "DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University', 'Portland State University'), ('Portland State University", "University'), ('Portland State University Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True,", "models.CharField(max_length=10, unique=True) def __str__(self): return self.name # Directory information for users with psuPublish", "import logging logger = logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40)", "models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True,", "self.name # Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True)", "blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room =", "self.name # Directory information for users with psuPublish = y. Upstream logic assumes", "are to be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State", "class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University', 'Portland State University'), ('Portland State", "= models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State", "self.description # For departmental dropdown choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True) def", "psuPublish = y. Upstream logic assumes that # all fields except psu_uuid are", "null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address", "dropdown choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self): return self.name #", "to be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University',", "unique=True) def __str__(self): return self.name # Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True)", "models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128,", "models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True)", "models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True)", "def __str__(self): return self.name # Directory information for users with psuPublish = y.", "= models.CharField(max_length=255, unique=True) def __str__(self): return self.name # Buildings class Building(models.Model): name =", "for users with psuPublish = y. Upstream logic assumes that # all fields", "def __str__(self): # pragma: no cover return self.psu_uuid # Password reset contact information.", "('Portland State University', 'Portland State University'), ('Portland State University Foundation', 'PSU Foundation'), )", "= logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255)", "Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self): return self.name # Buildings class Building(models.Model):", "= models.DateTimeField() end_display = models.DateTimeField() message = models.TextField() def __str__(self): # pragma: no", "# pragma: no cover return self.psu_uuid # Password reset contact information. class ContactInformation(models.Model):", "message = models.TextField() def __str__(self): # pragma: no cover return \"Maintenance starting: \"", "import validate_psu_phone import logging logger = logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code", "University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone])", "assumes that # all fields except psu_uuid are to be rendered and editable.", "primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return", "PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return", "telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title", "models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True,", "models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self): return self.name # Directory information", "city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code =", "job_title = models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building,", "zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): # pragma: no cover return", "import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import logging logger = logging.getLogger(__name__) #", "'Portland State University'), ('Portland State University Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True,", "# Directory information for users with psuPublish = y. Upstream logic assumes that", "pragma: no cover return self.psu_uuid # Password reset contact information. class ContactInformation(models.Model): psu_uuid", "Password reset contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone =", "class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField() message = models.TextField() def __str__(self):", "-- \" + self.description # For departmental dropdown choices. class Department(models.Model): name =", "return self.psu_uuid # Password reset contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36,", "null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50,", "= models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return self.code + \" -- \"", "models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city =", "State University'), ('Portland State University Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36,", "office_building = models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode,", "name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self): return self.name #", "max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone", "= models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city", "MyInfo.validators import validate_psu_phone import logging logger = logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model):", "from localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import logging logger =", "models.DateTimeField() message = models.TextField() def __str__(self): # pragma: no cover return \"Maintenance starting:", "department = models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10,", "__str__(self): return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display =", "__str__(self): # pragma: no cover return self.psu_uuid # Password reset contact information. class", "models.DateTimeField() end_display = models.DateTimeField() message = models.TextField() def __str__(self): # pragma: no cover", "+ \" -- \" + self.description # For departmental dropdown choices. class Department(models.Model):", "state = USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self):", "= models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self): return self.name # Directory", "blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW", "University', 'Portland State University'), ('Portland State University Foundation', 'PSU Foundation'), ) psu_uuid =", "Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return self.code + \"", "that # all fields except psu_uuid are to be rendered and editable. class", "= models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True,", "COMPANY_CHOICES = ( ('Portland State University', 'Portland State University'), ('Portland State University Foundation',", "def __str__(self): return self.code + \" -- \" + self.description # For departmental", "Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50,", "office_room = models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150,", "PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid # Maintenance", "= models.TextField() def __str__(self): # pragma: no cover return \"Maintenance starting: \" +", "validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building =", "models.TextField() def __str__(self): # pragma: no cover return \"Maintenance starting: \" + str(self.start_display)", "models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True,", "= models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\")", "('Portland State University Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False)", "company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True,", "choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax", "Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self):", "= models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True, blank=True,", "USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import logging logger = logging.getLogger(__name__) # PSU", "= models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department,", "# Password reset contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone", "class Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return self.code +", "= models.ForeignKey(Building, null=True, blank=True) office_room = models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True,", "= PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid #", "max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self):", "models from localflavor.us.models import USStateField, PhoneNumberField from MyInfo.validators import validate_psu_phone import logging logger", "return self.code + \" -- \" + self.description # For departmental dropdown choices.", "choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self): return self.name # Buildings", "logger = logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description =", "= models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True)", "= models.DateTimeField() message = models.TextField() def __str__(self): # pragma: no cover return \"Maintenance", "cover return self.psu_uuid # Password reset contact information. class ContactInformation(models.Model): psu_uuid = models.CharField(unique=True,", "psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True) cell_phone = PhoneNumberField(blank=True, null=True) alternate_email = models.EmailField(max_length=254, blank=True,", "null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True,", "start_display = models.DateTimeField() end_display = models.DateTimeField() message = models.TextField() def __str__(self): # pragma:", "def __str__(self): return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display", "University Foundation', 'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company =", "null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): # pragma: no", "users with psuPublish = y. Upstream logic assumes that # all fields except", "name = models.CharField(max_length=255, unique=True) def __str__(self): return self.name # Buildings class Building(models.Model): name", "null=True) def __str__(self): return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField()", "mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\")", "y. Upstream logic assumes that # all fields except psu_uuid are to be", "= models.CharField(max_length=10, null=True, blank=True) mail_code = models.ForeignKey(Mailcode, null=True, blank=True) street_address = models.CharField(max_length=150, null=True,", "code = models.CharField(max_length=10, unique=True) def __str__(self): return self.name # Directory information for users", "null=True, blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True, default=\"Portland\") state =", "unique=True) code = models.CharField(max_length=10, unique=True) def __str__(self): return self.name # Directory information for", "psu_uuid are to be rendered and editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland", "( ('Portland State University', 'Portland State University'), ('Portland State University Foundation', 'PSU Foundation'),", "Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True,", "= models.CharField(max_length=10, unique=True) def __str__(self): return self.name # Directory information for users with", "null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building = models.ForeignKey(Building, null=True, blank=True) office_room", "__str__(self): return self.name # Directory information for users with psuPublish = y. Upstream", "models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\")", "null=True, blank=True, default=\"97201\") def __str__(self): # pragma: no cover return self.psu_uuid # Password", "null=True) alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid # Maintenance notice.", "notice. class MaintenanceNotice(models.Model): start_display = models.DateTimeField() end_display = models.DateTimeField() message = models.TextField() def", "= USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): #", "street_address = models.CharField(max_length=150, null=True, blank=True, default=\"1825 SW Broadway\") city = models.CharField(max_length=50, null=True, blank=True,", "blank=True, null=True) def __str__(self): return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display =", "logging logger = logging.getLogger(__name__) # PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description", "'PSU Foundation'), ) psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES,", "default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True,", "blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department = models.ForeignKey(Department, null=True, blank=True) office_building", "alternate_email = models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid # Maintenance notice. class", "from MyInfo.validators import validate_psu_phone import logging logger = logging.getLogger(__name__) # PSU Mailcode class", "models.EmailField(max_length=254, blank=True, null=True) def __str__(self): return self.psu_uuid # Maintenance notice. class MaintenanceNotice(models.Model): start_display", "models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return self.code + \" -- \" +", "+ self.description # For departmental dropdown choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True)", "return self.name # Buildings class Building(models.Model): name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=10,", "code = models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self): return self.code + \" --", "models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True, default=\"Portland State University\") telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone])", "blank=True, validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True)", "validators=[validate_psu_phone]) fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone]) job_title = models.CharField(max_length=128, null=True, blank=True) department", "default=\"Portland\") state = USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def", "USStateField(blank=True, null=True, default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): # pragma:", "default=\"OR\") zip_code = models.CharField(max_length=10, null=True, blank=True, default=\"97201\") def __str__(self): # pragma: no cover", ") psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False) company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True,", "\" -- \" + self.description # For departmental dropdown choices. class Department(models.Model): name", "For departmental dropdown choices. class Department(models.Model): name = models.CharField(max_length=255, unique=True) def __str__(self): return", "editable. class DirectoryInformation(models.Model): COMPANY_CHOICES = ( ('Portland State University', 'Portland State University'), ('Portland", "# PSU Mailcode class Mailcode(models.Model): code = models.CharField(max_length=40) description = models.CharField(max_length=255) def __str__(self):", "\" + self.description # For departmental dropdown choices. class Department(models.Model): name = models.CharField(max_length=255,", "unique=True) def __str__(self): return self.name # Directory information for users with psuPublish =", "__str__(self): return self.code + \" -- \" + self.description # For departmental dropdown" ]
[ "isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return", "} for entry in features: feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field, None),", "in entry.iteritems(): if k != geometry_field: feature['properties'][k] = v data['features'].append(feature) return data def", "import models import json import datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj):", "return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features': [],", "'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, } for k, v in entry.iteritems():", "isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return", "django.contrib.gis.geos import GEOSGeometry from django.db import models import json import datetime import decimal", "= v data['features'].append(feature) return data def geojson(*args, **kwargs): encoder = JSONEncoder() data =", "self).default(obj) def geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features': [], } for", "return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection',", "return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if", "features: feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, } for", "from django.contrib.gis.geos import GEOSGeometry from django.db import models import json import datetime import", "for entry in features: feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties':", "data = { 'type': 'FeatureCollection', 'features': [], } for entry in features: feature", "v data['features'].append(feature) return data def geojson(*args, **kwargs): encoder = JSONEncoder() data = geojsondata(*args,", "if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj)", "return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'):", "None), 'properties': {}, } for k, v in entry.iteritems(): if k != geometry_field:", "json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data", "in features: feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, }", "isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def", "GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features,", "'features': [], } for entry in features: feature = { 'type': 'Feature', 'geometry':", "entry in features: feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {},", "django.db import models import json import datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self,", "{ 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, } for k, v in", "def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat()", "[], } for entry in features: feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field,", "json import datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date):", "geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features': [], } for entry in", "decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj)", "JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return", "models import json import datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if", "from django.db import models import json import datetime import decimal class JSONEncoder(json.JSONEncoder): def", "'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, } for k, v in entry.iteritems(): if", "import datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return", "return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if", "return data def geojson(*args, **kwargs): encoder = JSONEncoder() data = geojsondata(*args, **kwargs) return", "class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date):", "import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if", "def geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features': [], } for entry", "'properties': {}, } for k, v in entry.iteritems(): if k != geometry_field: feature['properties'][k]", "k != geometry_field: feature['properties'][k] = v data['features'].append(feature) return data def geojson(*args, **kwargs): encoder", "!= geometry_field: feature['properties'][k] = v data['features'].append(feature) return data def geojson(*args, **kwargs): encoder =", "= { 'type': 'FeatureCollection', 'features': [], } for entry in features: feature =", "} for k, v in entry.iteritems(): if k != geometry_field: feature['properties'][k] = v", "import GEOSGeometry from django.db import models import json import datetime import decimal class", "entry.iteritems(): if k != geometry_field: feature['properties'][k] = v data['features'].append(feature) return data def geojson(*args,", "isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return", "default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if", "geometry_field: feature['properties'][k] = v data['features'].append(feature) return data def geojson(*args, **kwargs): encoder = JSONEncoder()", "<reponame>slavas62/shape-utils from django.contrib.gis.geos import GEOSGeometry from django.db import models import json import datetime", "datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj)", "feature = { 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, } for k,", "k, v in entry.iteritems(): if k != geometry_field: feature['properties'][k] = v data['features'].append(feature) return", "entry.get(geometry_field, None), 'properties': {}, } for k, v in entry.iteritems(): if k !=", "return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return", "if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry):", "obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj,", "super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features': [], }", "if k != geometry_field: feature['properties'][k] = v data['features'].append(feature) return data def geojson(*args, **kwargs):", "datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson)", "models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data = { 'type':", "str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features':", "feature['properties'][k] = v data['features'].append(feature) return data def geojson(*args, **kwargs): encoder = JSONEncoder() data", "isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data = {", "GEOSGeometry from django.db import models import json import datetime import decimal class JSONEncoder(json.JSONEncoder):", "data def geojson(*args, **kwargs): encoder = JSONEncoder() data = geojsondata(*args, **kwargs) return encoder.encode(data)", "str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder,", "= { 'type': 'Feature', 'geometry': entry.get(geometry_field, None), 'properties': {}, } for k, v", "decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj,", "{ 'type': 'FeatureCollection', 'features': [], } for entry in features: feature = {", "v in entry.iteritems(): if k != geometry_field: feature['properties'][k] = v data['features'].append(feature) return data", "if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal):", "if isinstance(obj, models.Model): return str(obj) return super(JSONEncoder, self).default(obj) def geojsondata(features, geometry_field='geometry'): data =", "'geometry': entry.get(geometry_field, None), 'properties': {}, } for k, v in entry.iteritems(): if k", "import json import datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj,", "{}, } for k, v in entry.iteritems(): if k != geometry_field: feature['properties'][k] =", "obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj,", "obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj,", "data['features'].append(feature) return data def geojson(*args, **kwargs): encoder = JSONEncoder() data = geojsondata(*args, **kwargs)", "'FeatureCollection', 'features': [], } for entry in features: feature = { 'type': 'Feature',", "'type': 'FeatureCollection', 'features': [], } for entry in features: feature = { 'type':", "if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, GEOSGeometry): return json.JSONDecoder().decode(obj.geojson) if isinstance(obj, models.Model):", "geometry_field='geometry'): data = { 'type': 'FeatureCollection', 'features': [], } for entry in features:", "datetime import decimal class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat()", "for k, v in entry.iteritems(): if k != geometry_field: feature['properties'][k] = v data['features'].append(feature)" ]
[]
[ "= points def deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN:", "deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff = list()", "in self._points: xd = math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1] - yCoordinate),", "def __init__(self, points): self._points = points def deletePoints(self, event, xCoordinate, yCoordinate, flags, params):", "list() for point in self._points: xd = math.pow((point[0] - xCoordinate), 2) yd =", "flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff = list() for point in self._points:", "params): if event == cv2.EVENT_RBUTTONDOWN: diff = list() for point in self._points: xd", "import cv2 import math POINTS = [] class PointFilter: def __init__(self, points): self._points", "= [] class PointFilter: def __init__(self, points): self._points = points def deletePoints(self, event,", "POINTS = [] class PointFilter: def __init__(self, points): self._points = points def deletePoints(self,", "if event == cv2.EVENT_RBUTTONDOWN: diff = list() for point in self._points: xd =", "self._points: xd = math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1] - yCoordinate), 2)", "diff = list() for point in self._points: xd = math.pow((point[0] - xCoordinate), 2)", "class PointFilter: def __init__(self, points): self._points = points def deletePoints(self, event, xCoordinate, yCoordinate,", "point in self._points: xd = math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1] -", "points def deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff", "2) yd = math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd + yd) diff.append(d)", "points): self._points = points def deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if event", "== cv2.EVENT_RBUTTONDOWN: diff = list() for point in self._points: xd = math.pow((point[0] -", "yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff = list() for point in", "math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd", "yd = math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd + yd) diff.append(d) pointToDelete", "xCoordinate, yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff = list() for point", "for point in self._points: xd = math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1]", "math POINTS = [] class PointFilter: def __init__(self, points): self._points = points def", "self._points = points def deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if event ==", "event, xCoordinate, yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff = list() for", "xCoordinate), 2) yd = math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd + yd)", "cv2.EVENT_RBUTTONDOWN: diff = list() for point in self._points: xd = math.pow((point[0] - xCoordinate),", "- yCoordinate), 2) d = math.sqrt(xd + yd) diff.append(d) pointToDelete = diff.index(min(diff)) self._points.pop(pointToDelete)", "math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd + yd) diff.append(d) pointToDelete = diff.index(min(diff))", "PointFilter: def __init__(self, points): self._points = points def deletePoints(self, event, xCoordinate, yCoordinate, flags,", "[] class PointFilter: def __init__(self, points): self._points = points def deletePoints(self, event, xCoordinate,", "xd = math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1] - yCoordinate), 2) d", "= list() for point in self._points: xd = math.pow((point[0] - xCoordinate), 2) yd", "= math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd + yd) diff.append(d) pointToDelete =", "cv2 import math POINTS = [] class PointFilter: def __init__(self, points): self._points =", "__init__(self, points): self._points = points def deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if", "event == cv2.EVENT_RBUTTONDOWN: diff = list() for point in self._points: xd = math.pow((point[0]", "- xCoordinate), 2) yd = math.pow((point[1] - yCoordinate), 2) d = math.sqrt(xd +", "def deletePoints(self, event, xCoordinate, yCoordinate, flags, params): if event == cv2.EVENT_RBUTTONDOWN: diff =", "= math.pow((point[0] - xCoordinate), 2) yd = math.pow((point[1] - yCoordinate), 2) d =", "import math POINTS = [] class PointFilter: def __init__(self, points): self._points = points" ]
[ "cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate", "\"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test", "Ra_c_RB_tests prandtl = 1.0 dim = 2 dt_max = 0.005 end_time = 30", "= 1.0 dim = 2 dt_max = 0.005 end_time = 30 nb_procs =", "NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny =", "--dim {dim} \" f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y", "int(nx * aspect_ratio) if nx * aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test),", "= 1750 x_periodicity = False z_periodicity = False cluster = Cluster() cluster.commands_setting_env =", "in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} \"", "Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim}", "False cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda", "= Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\",", "in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if nx * aspect_ratio - ny:", "dt_max = 0.005 end_time = 30 nb_procs = 10 nx = 8 order", "Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export", "\" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command += \" --x-periodicity\" elif z_periodicity: command", "\" --x-periodicity\" elif z_periodicity: command += \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script(", "False z_periodicity = False cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\",", "Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim", "Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if nx * aspect_ratio -", "np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py", "--stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command += \" --x-periodicity\" elif", "1750 x_periodicity = False z_periodicity = False cluster = Cluster() cluster.commands_setting_env = [", "ny = int(nx * aspect_ratio) if nx * aspect_ratio - ny: continue Ra_vert_nums", "z_periodicity: command += \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script( command, name_run=name_run, nb_cores_per_node=nb_procs,", "= 10 stretch_factor = 0.0 Ra_vert = 1750 x_periodicity = False z_periodicity =", "aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if nx * aspect_ratio", "prandtl = 1.0 dim = 2 dt_max = 0.005 end_time = 30 nb_procs", "[ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\",", "f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command += \"", "\"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny", "{Ra_vert_num}\" ) if x_periodicity: command += \" --x-periodicity\" elif z_periodicity: command += \"", "as Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim =", "if nx * aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test),", "nx = 8 order = 10 stretch_factor = 0.0 Ra_vert = 1750 x_periodicity", "Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim = 2", "10 stretch_factor = 0.0 Ra_vert = 1750 x_periodicity = False z_periodicity = False", "{stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command += \" --x-periodicity\" elif z_periodicity:", "\"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio)", "{nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command", "if x_periodicity: command += \" --x-periodicity\" elif z_periodicity: command += \" --z-periodicity\" print(command)", "elif z_periodicity: command += \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script( command, name_run=name_run,", "{dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\"", "- ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in", "command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} \" f\"--order {order}", "\"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx", "= ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} \" f\"--order {order} --dt-max", "{aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command += \" --x-periodicity\"", "f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor}", "--end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" )", "] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if nx", "np from fluiddyn.clusters.legi import Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests", "z_periodicity = False cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source", "1.0 dim = 2 dt_max = 0.005 end_time = 30 nb_procs = 10", "as np from fluiddyn.clusters.legi import Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB as", "= 2 dt_max = 0.005 end_time = 30 nb_procs = 10 nx =", "Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} \" f\"--order", "+= \" --x-periodicity\" elif z_periodicity: command += \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\"", "= [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export", "env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items():", "import numpy as np from fluiddyn.clusters.legi import Calcul2 as Cluster from critical_Ra_RB import", "FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if", "-Pr {prandtl} -nx {nx} --dim {dim} \" f\"--order {order} --dt-max {dt_max} --end-time {end_time}", "0.005 end_time = 30 nb_procs = 10 nx = 8 order = 10", "nb_procs = 10 nx = 8 order = 10 stretch_factor = 0.0 Ra_vert", "* aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for", "import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim = 2 dt_max = 0.005", "x_periodicity = False z_periodicity = False cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\",", "8 order = 10 stretch_factor = 0.0 Ra_vert = 1750 x_periodicity = False", "/etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ]", "= 0.005 end_time = 30 nb_procs = 10 nx = 8 order =", "aspect_ratio) if nx * aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 *", "{prandtl} -nx {nx} --dim {dim} \" f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np", "for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if nx *", "\" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script( command, name_run=name_run, nb_cores_per_node=nb_procs, omp_num_threads=1, ask=False, )", "activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in", "= 8 order = 10 stretch_factor = 0.0 Ra_vert = 1750 x_periodicity =", "= False cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\",", "Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx", "= 30 nb_procs = 10 nx = 8 order = 10 stretch_factor =", "dim = 2 dt_max = 0.005 end_time = 30 nb_procs = 10 nx", "0.0 Ra_vert = 1750 x_periodicity = False z_periodicity = False cluster = Cluster()", "( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} \" f\"--order {order} --dt-max {dt_max}", "$PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio,", "import Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0", "\"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export", "\" f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor", ") if x_periodicity: command += \" --x-periodicity\" elif z_periodicity: command += \" --z-periodicity\"", "continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command", "+= \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script( command, name_run=name_run, nb_cores_per_node=nb_procs, omp_num_threads=1, ask=False,", "{nx} --dim {dim} \" f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} \"", "critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim = 2 dt_max =", "from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim = 2 dt_max", "aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num", "as Ra_c_RB_tests prandtl = 1.0 dim = 2 dt_max = 0.005 end_time =", "f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} \" f\"--order {order} --dt-max {dt_max} --end-time", "\"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\",", "= int(nx * aspect_ratio) if nx * aspect_ratio - ny: continue Ra_vert_nums =", "= 10 nx = 8 order = 10 stretch_factor = 0.0 Ra_vert =", "--x-periodicity\" elif z_periodicity: command += \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script( command,", "{order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \"", "{dim} \" f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio}", "fluiddyn.clusters.legi import Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl =", "4) for Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx}", "\"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\", \"export PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for", "-nx {nx} --dim {dim} \" f\"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs}", "np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr", "= False z_periodicity = False cluster = Cluster() cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source", "Ra_vert = 1750 x_periodicity = False z_periodicity = False cluster = Cluster() cluster.commands_setting_env", "Ra_c_RB_tests.items(): ny = int(nx * aspect_ratio) if nx * aspect_ratio - ny: continue", "= np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command = (", "--dt-max {dt_max} --end-time {end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert", "10 nx = 8 order = 10 stretch_factor = 0.0 Ra_vert = 1750", "* aspect_ratio) if nx * aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04", "stretch_factor = 0.0 Ra_vert = 1750 x_periodicity = False z_periodicity = False cluster", "= 0.0 Ra_vert = 1750 x_periodicity = False z_periodicity = False cluster =", "order = 10 stretch_factor = 0.0 Ra_vert = 1750 x_periodicity = False z_periodicity", "for Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim", "{end_time} -np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if", "cluster.commands_setting_env = [ \"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION\", \"source /etc/profile\", \"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh\", \"conda activate env-snek\", \"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000\",", "command += \" --x-periodicity\" elif z_periodicity: command += \" --z-periodicity\" print(command) name_run =", "from fluiddyn.clusters.legi import Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests prandtl", "end_time = 30 nb_procs = 10 nx = 8 order = 10 stretch_factor", "command += \" --z-periodicity\" print(command) name_run = f\"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}\" cluster.submit_script( command, name_run=name_run, nb_cores_per_node=nb_procs, omp_num_threads=1,", "ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums:", "2 dt_max = 0.005 end_time = 30 nb_procs = 10 nx = 8", "30 nb_procs = 10 nx = 8 order = 10 stretch_factor = 0.0", "nx * aspect_ratio - ny: continue Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4)", "PATH=$PATH:$NEK_SOURCE_ROOT/bin\", \"export FLUIDSIM_PATH=$PROJET_DIR/numerical/\", ] for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items(): ny = int(nx *", "numpy as np from fluiddyn.clusters.legi import Calcul2 as Cluster from critical_Ra_RB import Ra_c_RB", "-np {nb_procs} \" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity:", "Ra_c_RB as Ra_c_RB_tests prandtl = 1.0 dim = 2 dt_max = 0.005 end_time", "* Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command = ( f\"run_simul_check_from_python.py -Pr {prandtl}", "x_periodicity: command += \" --x-periodicity\" elif z_periodicity: command += \" --z-periodicity\" print(command) name_run", "Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4) for Ra_vert_num in Ra_vert_nums: command =", "\" f\"-a_y {aspect_ratio} --stretch-factor {stretch_factor} \" f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command +=", "f\"--Ra-vert {Ra_vert_num}\" ) if x_periodicity: command += \" --x-periodicity\" elif z_periodicity: command +=" ]
[ "typing import Dict, List, TypedDict class Dependency(TypedDict): name: str labels: Dict[str, str] statuses:", "engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str namespace: str", "pvc: str mountPath: str remotePath: str localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig:", "str] engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str namespace:", "from typing import Dict, List, TypedDict class Dependency(TypedDict): name: str labels: Dict[str, str]", "tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str namespace: str class TestEngineBody(TypedDict): metadata:", "Dict[str, str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str namespace: str class", "class Dependency(TypedDict): name: str labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc: str", "SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str namespace: str class TestEngineBody(TypedDict): metadata: Metadata", "str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str namespace: str class TestEngineBody(TypedDict):", "statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str localPath: str class", "name: str labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath: str", "Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig", "<reponame>herzo175/cicada-2 from typing import Dict, List, TypedDict class Dependency(TypedDict): name: str labels: Dict[str,", "Dependency(TypedDict): name: str labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath:", "remotePath: str localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str,", "str] statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str localPath: str", "Dict, List, TypedDict class Dependency(TypedDict): name: str labels: Dict[str, str] statuses: List[str] class", "import Dict, List, TypedDict class Dependency(TypedDict): name: str labels: Dict[str, str] statuses: List[str]", "ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name:", "TypedDict class Dependency(TypedDict): name: str labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc:", "str mountPath: str remotePath: str localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str,", "List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict):", "class SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str localPath: str class Spec(TypedDict): dependencies:", "List[str] class SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str localPath: str class Spec(TypedDict):", "Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig class Metadata(TypedDict): name: str", "localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests:", "SetupConfig class Metadata(TypedDict): name: str namespace: str class TestEngineBody(TypedDict): metadata: Metadata spec: Spec", "List, TypedDict class Dependency(TypedDict): name: str labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict):", "SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str localPath: str class Spec(TypedDict): dependencies: List[Dependency]", "str remotePath: str localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig:", "mountPath: str remotePath: str localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str]", "Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str localPath:", "labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath: str remotePath: str", "str labels: Dict[str, str] statuses: List[str] class SetupConfig(TypedDict): pvc: str mountPath: str remotePath:", "str localPath: str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str]", "reports: SetupConfig class Metadata(TypedDict): name: str namespace: str class TestEngineBody(TypedDict): metadata: Metadata spec:", "class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig reports:", "str class Spec(TypedDict): dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig", "dependencies: List[Dependency] ioConfig: Dict[str, str] engineConfig: Dict[str, str] tests: SetupConfig reports: SetupConfig class" ]
[ "in sys.argv: keep = 1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if", "pmag_env import set_env import operator OPS = {'<' : operator.lt, '<=' : operator.le,", "= [] for crit in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break #", "pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] =", "if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1 else:", "pyplot as plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env", "1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in", "plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....') for n in range(nboot): #", "'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col", "{} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files) if", "= 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\",", "table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata =", "a pre-tilt magnetization is indicated If the 95% conf bounds include 100, then", "2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites formatted file -exc use criteria", "'-b' in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else:", "turn into pmag data list data = list(data.T.apply(dict)) # get orientation data if", "site_col = 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey,", "'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile", "int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1,", "dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav'", "(red), CDF (green)') Untilt.sort() # now for CDF of tilt of maximum tau", "line options] OPTIONS -h prints help message and quits -f sites formatted file", "# sometimes orientation might be in a sample file instead else: ordata =", "'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col", "the % untilting that yields the most clustered result (maximum tau_1). Command line:", "= {} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files)", "data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data)", "NB, set number of bootstraps, default is 1000 -b MIN, MAX, set bounds", "'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else:", "pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data if len(GEOrecs) > 0: #", "projection of the input data in original coordinates Stratigraphic: is an equal area", "print(main.__doc__) sys.exit() # graceful quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot", "azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else: infile =", "Dropped {} records because each failed one or more criteria\".format(num_dropped)) else: print('no geographic", "95% conf bounds exclude both 0 and 100, syn-tilt magnetization is possible as", "numpy as np import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as", "import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env import", "np import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from", "= 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion'", "set number of bootstraps, default is 1000 -b MIN, MAX, set bounds for", "linewidth=1, linestyle='--') tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding')", "= pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df =", "<Return> to quit \\n ') if ans != 'a': print(\"Good bye\") sys.exit() files", "1, 'strat': 2, 'taus': 3} # make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'],", "= 0 if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max", "site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and rec[crit_name]: # get the correct", "0: for k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d,", "check if help is needed print(main.__doc__) sys.exit() # graceful quit kappa = 0", "parse data dip, dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col])", "list(ordata.T.apply(dict)) # sometimes orientation might be in a sample file instead else: ordata", "a function of untilting The solid line is the cumulative distribution of the", "for k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i,", "optimum untilting. If the 95% conf bounds include 0, then a pre-tilt magnetization", "== 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data if", "# then make sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep =", "file -fsi sites formatted file -exc use criteria to set acceptance criteria (supported", "GEOrecs: # parse data dip, dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc", "for taus PDs = pmag.pseudo(DIDDs) if kappa != 0: for k in range(len(PDs)):", "formatted file -exc use criteria to set acceptance criteria (supported only for data", "orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col", "clustered result (maximum tau_1). Command line: prints out the bootstrapped iterations and finally", "dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1) # keep only", "maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0,", "= df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn into pmag", "SiteCrits.append(crit) #break # get to work # PLTS = {'geo': 1, 'strat': 2,", "PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary if", "0 and dip_dir != -1: if '-exc' in sys.argv: keep = 1 for", "plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5,", "linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i - %i %s' %", "sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites formatted file -exc", "projection of the input data in tilt adjusted coordinates % Untilting: The dashed", "dashed vertical lines are 95% confidence bounds on the % untilting that yields", "crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for crit in crits: if crit[crit_col]", "> 0: # have some geographic data num_dropped = 0 DIDDs = []", "Untilt.sort() # now for CDF of tilt of maximum tau plt.plot(Untilt, Cdf, 'g')", "untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if '-fsa' in sys.argv:", "data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn into pmag data list data =", "= data.where(data.notnull(), \"\") # turn into pmag data list data = list(data.T.apply(dict)) #", "kappa != 0: for k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip", "bootstrapped iterations and finally the confidence bounds on optimum untilting. If the 95%", "default is svg -sav saves plots and quits -DM NUM MagIC data model", "{} records because each failed one or more criteria\".format(num_dropped)) else: print('no geographic directional", "untilting, default is -10, 150 -fmt FMT, specify format - default is svg", "plot = 1 else: plot = 0 if '-b' in sys.argv: ind =", "of untilting The solid line is the cumulative distribution of the % Untilting", "SiteCrits = [] for crit in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break", "SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints help message and quits -f", "0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to quit \\n ') if", "if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [],", "infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col = 'dir_dec'", "dec_col = 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip',", "and quits -DM NUM MagIC data model number (2 or 3, default 3)", "gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1", "as pmagplotlib from pmag_env import set_env import operator OPS = {'<' : operator.lt,", "pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc'", "dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if dip !=", "!= \"\": dip = float(orecs[0][dipkey]) if dip != 0 and dip_dir != -1:", "sample file instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else:", "op = OPS[site_crit['criterion_operation']] # then make sure the site record passes if op(float(rec[crit_name]),", "if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile", "data in original coordinates Stratigraphic: is an equal area projection of the input", "critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1) # keep only records", "# PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary", "that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting')", "5, 5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else:", "else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col =", "= 1 else: plot = 0 if '-b' in sys.argv: ind = sys.argv.index('-b')", "untilting The solid line is the cumulative distribution of the % Untilting required", "do bootstrap data sets - plot first 25 as dashed red line if", "-DM NUM MagIC data model number (2 or 3, default 3) OUTPUT Geographic:", "\"\") # turn into pmag data list data = list(data.T.apply(dict)) # get orientation", "up lists for taus PDs = pmag.pseudo(DIDDs) if kappa != 0: for k", "dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip", "pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--')", "dip]) if num_dropped: print(\"-W- Dropped {} records because each failed one or more", ": operator.gt, '>=': operator.ge, '=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does", "on optimum untilting. If the 95% conf bounds include 0, then a pre-tilt", "untilting that yields the most clustered result (maximum tau_1). Command line: prints out", "infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()]", "= int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if '-fsa' in sys.argv: orfile", "matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib import pyplot as", "data = data.where(data.notnull(), \"\") # turn into pmag data list data = list(data.T.apply(dict))", "', nboot, ' iterations...please be patient.....') for n in range(nboot): # do bootstrap", "if n < 25: plt.plot(Percs, Taus, 'r--') # tilt that gives maximum tau", "operator.ge, '=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a fold test", "= 'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile,", "Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....')", "input('S[a]ve all figures, <Return> to quit \\n ') if ans != 'a': print(\"Good", "= pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col", "print(\"-W- Dropped {} records because each failed one or more criteria\".format(num_dropped)) else: print('no", "dip_dir, dip]) if num_dropped: print(\"-W- Dropped {} records because each failed one or", "model number (2 or 3, default 3) OUTPUT Geographic: is an equal area", "of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num ==", "header=1) # keep only records with tilt_col data = df.copy() data = data[data[tilt_col].notnull()]", "= [] # set up lists for taus PDs = pmag.pseudo(DIDDs) if kappa", "get principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--') # tilt", "bounds exclude both 0 and 100, syn-tilt magnetization is possible as is vertical", "\".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\")", "3, default 3) OUTPUT Geographic: is an equal area projection of the input", "pmag_sites.txt] -fsa samples formatted file -fsi sites formatted file -exc use criteria to", "#!/usr/bin/env python import os import sys import numpy as np import matplotlib if", "is svg -sav saves plots and quits -DM NUM MagIC data model number", "Geographic: is an equal area projection of the input data in original coordinates", "in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3])", "sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max", "of bootstraps, default is 1000 -b MIN, MAX, set bounds for untilting, default", "yields the most clustered result (maximum tau_1). Command line: prints out the bootstrapped", "%i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot == 0:", "file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits =", "= list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type", "= [] # set up list for dec inc dip_direction, dip for rec", "geographic data num_dropped = 0 DIDDs = [] # set up list for", "conf bounds exclude both 0 and 100, syn-tilt magnetization is possible as is", "number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num", "-fmt FMT, specify format - default is svg -sav saves plots and quits", "1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars =", "an equal area projection of the input data in tilt adjusted coordinates %", "untilting. If the 95% conf bounds include 0, then a pre-tilt magnetization is", "taus PDs = pmag.pseudo(DIDDs) if kappa != 0: for k in range(len(PDs)): d,", "tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF", "100, syn-tilt magnetization is possible as is vertical axis rotation or other pathologies", "plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [], []", "and finally the confidence bounds on optimum untilting. If the 95% conf bounds", "!= \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib import pyplot as plt", "kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number", "a sample file instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict))", "plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for", "The dashed vertical lines are 95% confidence bounds on the % untilting that", "of the % Untilting required to maximize tau for all the bootstrapped data", "to quit \\n ') if ans != 'a': print(\"Good bye\") sys.exit() files =", "figures, <Return> to quit \\n ') if ans != 'a': print(\"Good bye\") sys.exit()", "[] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....') for n in range(nboot):", "Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T')", "DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip])", "for crit in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get to", "input data in original coordinates Stratigraphic: is an equal area projection of the", "pre-tilt magnetization is indicated If the 95% conf bounds include 100, then a", "INPUT FORMAT pmag_specimens format file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command", "= pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for perc", "Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--') # tilt that gives maximum", "header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv:", "pmag data list data = list(data.T.apply(dict)) # get orientation data if data_model_num ==", "in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get to work #", "not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num", "quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) #", "failed one or more criteria\".format(num_dropped)) else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'],", "if kappa != 0: for k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir,", "dipdir PDs[k][3] = dip for perc in Percs: tilt = np.array([1., 1., 1.,", "linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i - %i %s'", "bounds include 0, then a pre-tilt magnetization is indicated If the 95% conf", "ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i - %i", "DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped {} records because each failed", "np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n <", "else: GEOrecs = data if len(GEOrecs) > 0: # have some geographic data", "of the input data in tilt adjusted coordinates % Untilting: The dashed (red)", "for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites", "get to work # PLTS = {'geo': 1, 'strat': 2, 'taus': 3} #", "get the correct operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] # then", "'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col", "are 95% confidence bounds on the % untilting that yields the most clustered", "# make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5)", "= 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction'", "the % Untilting required to maximize tau for all the bootstrapped data sets.", "= OPS[site_crit['criterion_operation']] # then make sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])):", "pd from matplotlib import pyplot as plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib", "data model number (2 or 3, default 3) OUTPUT Geographic: is an equal", "orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile =", "be in a sample file instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata", "the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is possible", "plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env", "operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] # then make sure the", "if crit_name and crit_name in rec.keys() and rec[crit_name]: # get the correct operation", "[], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....') for n in", "site_col, rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey] != \"\": dip_dir =", "distribution of the % Untilting required to maximize tau for all the bootstrapped", "= pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else:", "representative plots of maximum eigenvalue (tau_1) as a function of untilting The solid", "pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile = pmag.get_named_arg(\"-f\",", "DIDDs, 'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose()", "5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col,", "format file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS", "dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W-", "= pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus,", "(supported only for data model 3) -n NB, set number of bootstraps, default", "magnetization is possible as is vertical axis rotation or other pathologies \"\"\" if", "== 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc,", "required to maximize tau for all the bootstrapped data sets. The dashed vertical", "= 0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata,", "== 0: print(n) Taus = [] # set up lists for taus PDs", "I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal", "= np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic')", "'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt =", "function of untilting The solid line is the cumulative distribution of the %", "in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata =", "'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc'", "Taus = [] # set up lists for taus PDs = pmag.pseudo(DIDDs) if", "i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir", "most clustered result (maximum tau_1). Command line: prints out the bootstrapped iterations and", "0: # have some geographic data num_dropped = 0 DIDDs = [] #", "The solid line is the cumulative distribution of the % Untilting required to", "is -10, 150 -fmt FMT, specify format - default is svg -sav saves", "get orientation data if data_model_num == 3: # often orientation will be in", "a post-tilt magnetization is indicated If the 95% conf bounds exclude both 0", "ordata = list(ordata.T.apply(dict)) # sometimes orientation might be in a sample file instead", "pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for perc in", "coordinates Stratigraphic: is an equal area projection of the input data in tilt", "pd.read_csv(infile, sep='\\t', header=1) # keep only records with tilt_col data = df.copy() data", "% Untilting: The dashed (red) curves are representative plots of maximum eigenvalue (tau_1)", "adjusted coordinates % Untilting: The dashed (red) curves are representative plots of maximum", "import numpy as np import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas", "-1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col],", "pmag.magic_read(critfile) SiteCrits = [] for crit in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit)", "float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs)", "dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem(", "pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS)", "untilt_min, untilt_max = -10, 150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\")", "ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might be in a", "= 0 DIDDs = [] # set up list for dec inc dip_direction,", "-exc use criteria to set acceptance criteria (supported only for data model 3)", "float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) > 0: if", "if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if dip != 0 and dip_dir", "data sets - plot first 25 as dashed red line if n %", "dashed (red) curves are representative plots of maximum eigenvalue (tau_1) as a function", "'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus,", "df = pd.read_csv(infile, sep='\\t', header=1) # keep only records with tilt_col data =", "prints out the bootstrapped iterations and finally the confidence bounds on optimum untilting.", "= pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey] !=", "graceful quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000)))", "for untilting, default is -10, 150 -fmt FMT, specify format - default is", "might be in a sample file instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1)", "directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I =", "150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv:", "# get to work # PLTS = {'geo': 1, 'strat': 2, 'taus': 3}", "include 100, then a post-tilt magnetization is indicated If the 95% conf bounds", "int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if '-fsa' in", "= pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions", "'-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful", "sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey, azkey = 'bed_dip',", "else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped {} records because each", "= 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1 else:", "== 3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path)", "pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col =", "= pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1) #", "geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I", "sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150", "= np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs =", "= int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1,", "# keep only records with tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data", "file (for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints help message", "pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data,", "default is 1000 -b MIN, MAX, set bounds for untilting, default is -10,", "or other pathologies \"\"\" if '-h' in sys.argv: # check if help is", "in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and rec[crit_name]:", "dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else: infile", "sites formatted file -exc use criteria to set acceptance criteria (supported only for", "data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col = 'site'", "line is the cumulative distribution of the % Untilting required to maximize tau", "orfile = 'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col", "then a pre-tilt magnetization is indicated If the 95% conf bounds include 100,", "magnetization is indicated If the 95% conf bounds exclude both 0 and 100,", "criteria (supported only for data model 3) -n NB, set number of bootstraps,", "d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] =", "tilt = np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D,", "'strat': 2, 'taus': 3} # make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5,", "plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to", "== 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction'", "pandas as pd from matplotlib import pyplot as plt import pmagpy.pmag as pmag", "only records with tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(),", "- default is svg -sav saves plots and quits -DM NUM MagIC data", "plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for CDF of tilt of maximum", "in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\",", "Untilting required to maximize tau for all the bootstrapped data sets. The dashed", "def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010) on", "an equal area projection of the input data in original coordinates Stratigraphic: is", "import pandas as pd from matplotlib import pyplot as plt import pmagpy.pmag as", "pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env import operator OPS = {'<' :", "area projection of the input data in tilt adjusted coordinates % Untilting: The", "print(n) Taus = [] # set up lists for taus PDs = pmag.pseudo(DIDDs)", "er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints", "-b MIN, MAX, set bounds for untilting, default is -10, 150 -fmt FMT,", "= 1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name", "the confidence bounds on optimum untilting. If the 95% conf bounds include 0,", "other pathologies \"\"\" if '-h' in sys.argv: # check if help is needed", "cumulative distribution of the % Untilting required to maximize tau for all the", "curves are representative plots of maximum eigenvalue (tau_1) as a function of untilting", "bootstrapped data sets. The dashed vertical lines are 95% confidence bounds on the", "'-exc' in sys.argv: keep = 1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1]", "PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for perc in Percs: tilt", "= pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num", "options] OPTIONS -h prints help message and quits -f sites formatted file [default", "!= 0 and dip_dir != -1: if '-exc' in sys.argv: keep = 1", "= 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col =", "2010) on data INPUT FORMAT pmag_specimens format file, er_samples.txt format file (for bedding)", "1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped {} records because", "2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data if len(GEOrecs)", "= pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile =", "0 and 100, syn-tilt magnetization is possible as is vertical axis rotation or", "range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2]", "= 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num ==", "(tau_1) as a function of untilting The solid line is the cumulative distribution", "needed print(main.__doc__) sys.exit() # graceful quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\")", "format - default is svg -sav saves plots and quits -DM NUM MagIC", "'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'],", "if data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey =", "default 3) OUTPUT Geographic: is an equal area projection of the input data", "fold test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens format file, er_samples.txt format", "1000))) # number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3)))", "in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min,", "sys.exit() files = {} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt)", "plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for CDF of tilt", "if ans != 'a': print(\"Good bye\") sys.exit() files = {} for key in", "= pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col", "in a sample file instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata =", "-h prints help message and quits -f sites formatted file [default for 3.0", "does a fold test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens format file,", "tilt adjusted coordinates % Untilting: The dashed (red) curves are representative plots of", "untilt_max = -10, 150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif", "record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep == 1: DIDDs.append([Dec,", "dip for perc in Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D, I", "if dip != 0 and dip_dir != -1: if '-exc' in sys.argv: keep", "= {'<' : operator.lt, '<=' : operator.le, '>' : operator.gt, '>=': operator.ge, '=':", "is vertical axis rotation or other pathologies \"\"\" if '-h' in sys.argv: #", "of maximum eigenvalue (tau_1) as a function of untilting The solid line is", "if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs =", "'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path)", "GEOrecs = data if len(GEOrecs) > 0: # have some geographic data num_dropped", "ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs,", "1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs)", "= 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code'", "= 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt'", "I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max))", "\"\"\" if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit()", "data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip',", "if len(GEOrecs) > 0: # have some geographic data num_dropped = 0 DIDDs", "else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs)", "\"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib import pyplot as plt import", "pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1'])", "foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens", "= int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if '-fsa'", "If the 95% conf bounds include 100, then a post-tilt magnetization is indicated", "Inc, dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if", "3))) if data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col", "'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile", "\"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt')", "sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs =", "set_env import operator OPS = {'<' : operator.lt, '<=' : operator.le, '>' :", "indicated If the 95% conf bounds include 100, then a post-tilt magnetization is", "plot = 0 if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1])", "D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot", "foldtest_magic.py [command line options] OPTIONS -h prints help message and quits -f sites", "= int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit", "orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile =", "= float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if", "'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile", "pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num ==", "into pmag data list data = list(data.T.apply(dict)) # get orientation data if data_model_num", "found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs", "rec[crit_name]: # get the correct operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']]", "acceptance criteria (supported only for data model 3) -n NB, set number of", "and 100, syn-tilt magnetization is possible as is vertical axis rotation or other", "'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey =", "[command line options] OPTIONS -h prints help message and quits -f sites formatted", "or more criteria\".format(num_dropped)) else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic')", "in rec.keys() and rec[crit_name]: # get the correct operation (<, >=, =, etc.)", "line if n % 50 == 0: print(n) Taus = [] # set", "Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for CDF", "0 DIDDs = [] # set up list for dec inc dip_direction, dip", "vertical lines are 95% confidence bounds on the % untilting that yields the", "dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t',", "dip, dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs =", "3} # make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5,", "The dashed (red) curves are representative plots of maximum eigenvalue (tau_1) as a", "sometimes orientation might be in a sample file instead else: ordata = pd.read_csv(orfile,", "data num_dropped = 0 DIDDs = [] # set up list for dec", "first 25 as dashed red line if n % 50 == 0: print(n)", "tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile =", "os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation", "-10, 150 -fmt FMT, specify format - default is svg -sav saves plots", "if '-sav' in sys.argv: plot = 1 else: plot = 0 if '-b'", "= float(orecs[0][dipkey]) if dip != 0 and dip_dir != -1: if '-exc' in", "# get orientation data if data_model_num == 3: # often orientation will be", "# have some geographic data num_dropped = 0 DIDDs = [] # set", "95% conf bounds include 100, then a post-tilt magnetization is indicated If the", "import sys import numpy as np import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\")", "plots and quits -DM NUM MagIC data model number (2 or 3, default", "== \"DE-SITE\": SiteCrits.append(crit) #break # get to work # PLTS = {'geo': 1,", "n in range(nboot): # do bootstrap data sets - plot first 25 as", "!= 'a': print(\"Good bye\") sys.exit() files = {} for key in list(PLTS.keys()): files[key]", "key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files) if __name__ ==", "data INPUT FORMAT pmag_specimens format file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py", "= int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num =", "'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot", "'>=': operator.ge, '=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a fold", "is an equal area projection of the input data in tilt adjusted coordinates", "= site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and rec[crit_name]: # get the", "-1: if '-exc' in sys.argv: keep = 1 for site_crit in SiteCrits: crit_name", "0, then a pre-tilt magnetization is indicated If the 95% conf bounds include", "have some geographic data num_dropped = 0 DIDDs = [] # set up", "orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey])", "\"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if dip", "nboot, ' iterations...please be patient.....') for n in range(nboot): # do bootstrap data", "import set_env import operator OPS = {'<' : operator.lt, '<=' : operator.le, '>'", "file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h", "operator.le, '>' : operator.gt, '>=': operator.ge, '=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py", "150 -fmt FMT, specify format - default is svg -sav saves plots and", "else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped", "95% conf bounds include 0, then a pre-tilt magnetization is indicated If the", ">=, =, etc.) op = OPS[site_crit['criterion_operation']] # then make sure the site record", "criteria\".format(num_dropped)) else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data =", "0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) #", "are representative plots of maximum eigenvalue (tau_1) as a function of untilting The", "if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2])", "keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec,", "0 if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max =", "Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)')", "data.where(data.notnull(), \"\") # turn into pmag data list data = list(data.T.apply(dict)) # get", "% 50 == 0: print(n) Taus = [] # set up lists for", "dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip',", "patient.....') for n in range(nboot): # do bootstrap data sets - plot first", "is indicated If the 95% conf bounds exclude both 0 and 100, syn-tilt", "= sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10,", "pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for", "5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs", "if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir,", "python import os import sys import numpy as np import matplotlib if matplotlib.get_backend()", "if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if", "dip = float(orecs[0][dipkey]) if dip != 0 and dip_dir != -1: if '-exc'", "pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env import operator", "pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1) # keep only records with tilt_col", "PDs[k][3] = dip for perc in Percs: tilt = np.array([1., 1., 1., 0.01*perc])", "num_dropped = 0 DIDDs = [] # set up list for dec inc", "operator.gt, '>=': operator.ge, '=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a", "'=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe,", "ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile)", "'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col = 'dir_inc'", "= list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, '", "maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red),", "records because each failed one or more criteria\".format(num_dropped)) else: print('no geographic directional data", "else: if data_model_num == 3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile", "np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars", "import pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env import operator OPS = {'<'", "import pyplot as plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib from", "# get the correct operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] #", "= np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose()", "to work # PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make", "of the input data in original coordinates Stratigraphic: is an equal area projection", "5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0',", "is possible as is vertical axis rotation or other pathologies \"\"\" if '-h'", "format file (for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints help", "indicated If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization", "sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep", "import os import sys import numpy as np import matplotlib if matplotlib.get_backend() !=", "pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey,", "make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'],", "'T') else: GEOrecs = data if len(GEOrecs) > 0: # have some geographic", "= 'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1 else: plot = 0", "lines are 95% confidence bounds on the % untilting that yields the most", "plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1,", "'-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile =", "int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\",", "etc.) op = OPS[site_crit['criterion_operation']] # then make sure the site record passes if", "= dipdir PDs[k][3] = dip for perc in Percs: tilt = np.array([1., 1.,", "original coordinates Stratigraphic: is an equal area projection of the input data in", "iterations and finally the confidence bounds on optimum untilting. If the 95% conf", "else: plot = 0 if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min =", "keep = 1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and", "Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures,", "\"\"\" NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010) on data INPUT", "len(orecs) > 0: if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] !=", "OPS[site_crit['criterion_operation']] # then make sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep", "the 95% conf bounds include 100, then a post-tilt magnetization is indicated If", "float(orecs[0][dipkey]) if dip != 0 and dip_dir != -1: if '-exc' in sys.argv:", "data model 3) -n NB, set number of bootstraps, default is 1000 -b", "float(site_crit['criterion_value'])): keep = 0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else:", "3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else:", "directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--') # tilt that gives", "sys.argv: plot = 1 else: plot = 0 if '-b' in sys.argv: ind", "criteria to set acceptance criteria (supported only for data model 3) -n NB,", "= {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary if not", "0: print(n) Taus = [] # set up lists for taus PDs =", "!= \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if", "Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')", "all figures, <Return> to quit \\n ') if ans != 'a': print(\"Good bye\")", "= 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile,", "Command line: prints out the bootstrapped iterations and finally the confidence bounds on", "crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and rec[crit_name]: # get", "print('doing ', nboot, ' iterations...please be patient.....') for n in range(nboot): # do", "formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted", "!= 0: for k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip =", "is indicated If the 95% conf bounds include 100, then a post-tilt magnetization", "upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')", "'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1 else: plot = 0 if", "= data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn into pmag data list data", "crit_col = 'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile =", "I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n < 25:", "'>' : operator.gt, '>=': operator.ge, '=': operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION", "finally the confidence bounds on optimum untilting. If the 95% conf bounds include", "file instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata,", "'Percent Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all", "data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs,", "(Tauxe, 2010) on data INPUT FORMAT pmag_specimens format file, er_samples.txt format file (for", "as plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env import", ": operator.le, '>' : operator.gt, '>=': operator.ge, '=': operator.eq} def main(): \"\"\" NAME", "df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn into pmag data", "then a post-tilt magnetization is indicated If the 95% conf bounds exclude both", "# set up list for dec inc dip_direction, dip for rec in GEOrecs:", "matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib import pyplot as plt import pmagpy.pmag", "lists for taus PDs = pmag.pseudo(DIDDs) if kappa != 0: for k in", "dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5)", "samples formatted file -fsi sites formatted file -exc use criteria to set acceptance", "quits -f sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt]", "# number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if", "sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in", "(sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata", "= pmag.magic_read(critfile) SiteCrits = [] for crit in crits: if crit[crit_col] == \"DE-SITE\":", "'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot =", "untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be", "os import sys import numpy as np import matplotlib if matplotlib.get_backend() != \"TKAgg\":", "fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile", "list(data.T.apply(dict)) # get orientation data if data_model_num == 3: # often orientation will", "data sets. The dashed vertical lines are 95% confidence bounds on the %", "pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile,", "% (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans", "25 as dashed red line if n % 50 == 0: print(n) Taus", "len(GEOrecs) > 0: # have some geographic data num_dropped = 0 DIDDs =", "is 1000 -b MIN, MAX, set bounds for untilting, default is -10, 150", "from pmag_env import set_env import operator OPS = {'<' : operator.lt, '<=' :", "Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for CDF of tilt of", "MAX, set bounds for untilting, default is -10, 150 -fmt FMT, specify format", "') if ans != 'a': print(\"Good bye\") sys.exit() files = {} for key", "= float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) > 0:", "the bootstrapped data sets. The dashed vertical lines are 95% confidence bounds on", "orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if dip != 0 and dip_dir !=", "import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib", "help is needed print(main.__doc__) sys.exit() # graceful quit kappa = 0 dir_path =", "= list(data.T.apply(dict)) # get orientation data if data_model_num == 3: # often orientation", "data if data_model_num == 3: # often orientation will be in infile (sites", "'0', 'T') else: GEOrecs = data if len(GEOrecs) > 0: # have some", "number (2 or 3, default 3) OUTPUT Geographic: is an equal area projection", "# parse data dip, dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc =", "float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if dip != 0 and", "Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort()", "conf bounds include 100, then a post-tilt magnetization is indicated If the 95%", "principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--') # tilt that", "tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn", "= ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might be in a sample", "lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0,", "site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and", "bounds on optimum untilting. If the 95% conf bounds include 0, then a", "0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps", "operator.eq} def main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010)", "orientation data if data_model_num == 3: # often orientation will be in infile", "3) -n NB, set number of bootstraps, default is 1000 -b MIN, MAX,", "and dip_dir != -1: if '-exc' in sys.argv: keep = 1 for site_crit", "%s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS)", "infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1)", "/ float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() #", "i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for perc in Percs:", "tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit)", "#break # get to work # PLTS = {'geo': 1, 'strat': 2, 'taus':", "for n in range(nboot): # do bootstrap data sets - plot first 25", "on data INPUT FORMAT pmag_specimens format file, er_samples.txt format file (for bedding) SYNTAX", "in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit", "ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i -", "FMT, specify format - default is svg -sav saves plots and quits -DM", "area projection of the input data in original coordinates Stratigraphic: is an equal", "= pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if", "Taus, 'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs,", "tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1,", "confidence bounds on optimum untilting. If the 95% conf bounds include 0, then", "+= 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped {} records", "untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if", "in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for crit in crits:", "orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey]", "up list for dec inc dip_direction, dip for rec in GEOrecs: # parse", "n < 25: plt.plot(Percs, Taus, 'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))])", "in Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs", "in GEOrecs: # parse data dip, dip_dir = 0, -1 Dec = float(rec[dec_col])", "> 0: if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\":", "tau for all the bootstrapped data sets. The dashed vertical lines are 95%", "maximum eigenvalue (tau_1) as a function of untilting The solid line is the", "float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now", "set acceptance criteria (supported only for data model 3) -n NB, set number", "num_dropped: print(\"-W- Dropped {} records because each failed one or more criteria\".format(num_dropped)) else:", "only for data model 3) -n NB, set number of bootstraps, default is", "I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot ==", "TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if", "= 'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col =", "inc dip_direction, dip for rec in GEOrecs: # parse data dip, dip_dir =", "quit \\n ') if ans != 'a': print(\"Good bye\") sys.exit() files = {}", "maximize tau for all the bootstrapped data sets. The dashed vertical lines are", "set bounds for untilting, default is -10, 150 -fmt FMT, specify format -", "\"DE-SITE\": SiteCrits.append(crit) #break # get to work # PLTS = {'geo': 1, 'strat':", "pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ',", "Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....') for", "dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for", "rotation or other pathologies \"\"\" if '-h' in sys.argv: # check if help", "from matplotlib import pyplot as plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib as", "quits -DM NUM MagIC data model number (2 or 3, default 3) OUTPUT", "(2 or 3, default 3) OUTPUT Geographic: is an equal area projection of", "pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to quit \\n ') if ans", "magnetization is indicated If the 95% conf bounds include 100, then a post-tilt", "axis rotation or other pathologies \"\"\" if '-h' in sys.argv: # check if", "\"\") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3:", "svg -sav saves plots and quits -DM NUM MagIC data model number (2", "pathologies \"\"\" if '-h' in sys.argv: # check if help is needed print(main.__doc__)", "5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs", "help message and quits -f sites formatted file [default for 3.0 is sites.txt,", "np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min,", "in sys.argv: plot = 1 else: plot = 0 if '-b' in sys.argv:", "elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey,", "is an equal area projection of the input data in original coordinates Stratigraphic:", "is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites formatted file", "50 == 0: print(n) Taus = [] # set up lists for taus", "that yields the most clustered result (maximum tau_1). Command line: prints out the", "data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data", "for CDF of tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot)", "[] # set up lists for taus PDs = pmag.pseudo(DIDDs) if kappa !=", "DESCRIPTION does a fold test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens format", "int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt'", "\"\": dip = float(orecs[0][dipkey]) if dip != 0 and dip_dir != -1: if", "crit_name and crit_name in rec.keys() and rec[crit_name]: # get the correct operation (<,", "iterations...please be patient.....') for n in range(nboot): # do bootstrap data sets -", "sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples", "crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get to work # PLTS", "red line if n % 50 == 0: print(n) Taus = [] #", "file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file", "bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3:", "in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey, azkey =", "ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits", "keep only records with tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data =", "ans != 'a': print(\"Good bye\") sys.exit() files = {} for key in list(PLTS.keys()):", "PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for perc in Percs: tilt =", "bootstrap data sets - plot first 25 as dashed red line if n", "5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs =", "Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped {} records because each failed one", "plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i", "the cumulative distribution of the % Untilting required to maximize tau for all", "specify format - default is svg -sav saves plots and quits -DM NUM", "one or more criteria\".format(num_dropped)) else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs,", "sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit kappa", "# now for CDF of tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower", "= -10, 150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi'", "tau_1). Command line: prints out the bootstrapped iterations and finally the confidence bounds", "include 0, then a pre-tilt magnetization is indicated If the 95% conf bounds", "as is vertical axis rotation or other pathologies \"\"\" if '-h' in sys.argv:", "# do bootstrap data sets - plot first 25 as dashed red line", "for perc in Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D, I =", "correct operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] # then make sure", "sets. The dashed vertical lines are 95% confidence bounds on the % untilting", "list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files) if __name__ == \"__main__\": main()", "for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files) if __name__", "message and quits -f sites formatted file [default for 3.0 is sites.txt, for", "confidence bounds on the % untilting that yields the most clustered result (maximum", "then make sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0", "= pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3]", "if data_model_num == 3: # often orientation will be in infile (sites table)", "If the 95% conf bounds include 0, then a pre-tilt magnetization is indicated", "# check if help is needed print(main.__doc__) sys.exit() # graceful quit kappa =", "list data = list(data.T.apply(dict)) # get orientation data if data_model_num == 3: #", "= pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1) # keep only records with", "= pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = []", "'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3:", "a fold test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens format file, er_samples.txt", "tilt_col, '0', 'T') else: GEOrecs = data if len(GEOrecs) > 0: # have", "-10, 150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in", "'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction'", "np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if", "== 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to quit \\n ')", "plot first 25 as dashed red line if n % 50 == 0:", "list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type =", "Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve", "eigenvalue (tau_1) as a function of untilting The solid line is the cumulative", "to maximize tau for all the bootstrapped data sets. The dashed vertical lines", "3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col =", "dir_path) df = pd.read_csv(infile, sep='\\t', header=1) # keep only records with tilt_col data", "some geographic data num_dropped = 0 DIDDs = [] # set up list", "for all the bootstrapped data sets. The dashed vertical lines are 95% confidence", "0: if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip", "bye\") sys.exit() files = {} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' %", "(<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] # then make sure the site", "dip_dir != -1: if '-exc' in sys.argv: keep = 1 for site_crit in", ": operator.lt, '<=' : operator.le, '>' : operator.gt, '>=': operator.ge, '=': operator.eq} def", "# set up lists for taus PDs = pmag.pseudo(DIDDs) if kappa != 0:", "'-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey, azkey", "azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num", "k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2],", "pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf,", "OUTPUT Geographic: is an equal area projection of the input data in original", "for data model 3) -n NB, set number of bootstraps, default is 1000", "site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep == 1:", "op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip])", "= 0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped +=", "set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num ==", "= [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....') for n", "set up lists for taus PDs = pmag.pseudo(DIDDs) if kappa != 0: for", "=, etc.) op = OPS[site_crit['criterion_operation']] # then make sure the site record passes", "-n NB, set number of bootstraps, default is 1000 -b MIN, MAX, set", "for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites formatted file -exc use", "as pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env import operator OPS", "= np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n", "prints help message and quits -f sites formatted file [default for 3.0 is", "ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i - %i %s' % (Untilt[lower], Untilt[upper],", "'<=' : operator.le, '>' : operator.gt, '>=': operator.ge, '=': operator.eq} def main(): \"\"\"", "pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey] != \"\":", "FORMAT pmag_specimens format file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command line", "data_model_num == 3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile,", "== os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes", "pmagplotlib from pmag_env import set_env import operator OPS = {'<' : operator.lt, '<='", "ordata, site_col, rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey] != \"\": dip_dir", "if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() #", "conf bounds include 0, then a pre-tilt magnetization is indicated If the 95%", "'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt')", "make sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if", "if data_model_num == 3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile =", "as pd from matplotlib import pyplot as plt import pmagpy.pmag as pmag import", "3) OUTPUT Geographic: is an equal area projection of the input data in", "[] # set up list for dec inc dip_direction, dip for rec in", "keep = 0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped", "often orientation will be in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata", "file_type = pmag.magic_read(critfile) SiteCrits = [] for crit in crits: if crit[crit_col] ==", "DIDDs = [] # set up list for dec inc dip_direction, dip for", "the input data in original coordinates Stratigraphic: is an equal area projection of", "# often orientation will be in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]:", "crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get to work # PLTS = {'geo':", "plt.plot(Percs, Taus, 'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot))", "CDF of tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper", "operator.lt, '<=' : operator.le, '>' : operator.gt, '>=': operator.ge, '=': operator.eq} def main():", "bounds on the % untilting that yields the most clustered result (maximum tau_1).", "sep='\\t', header=1) # keep only records with tilt_col data = df.copy() data =", "infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec'", "NUM MagIC data model number (2 or 3, default 3) OUTPUT Geographic: is", "== 3: # often orientation will be in infile (sites table) if os.path.split(orfile)[1]", "the 95% conf bounds include 0, then a pre-tilt magnetization is indicated If", "equal area projection of the input data in original coordinates Stratigraphic: is an", "solid line is the cumulative distribution of the % Untilting required to maximize", "tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile =", "if num_dropped: print(\"-W- Dropped {} records because each failed one or more criteria\".format(num_dropped))", "'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper],", "out the bootstrapped iterations and finally the confidence bounds on optimum untilting. If", "dec inc dip_direction, dip for rec in GEOrecs: # parse data dip, dip_dir", "Untilting: The dashed (red) curves are representative plots of maximum eigenvalue (tau_1) as", "critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1 else: plot =", "main(): \"\"\" NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010) on data", "# graceful quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\",", "n % 50 == 0: print(n) Taus = [] # set up lists", "int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit =", "if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip =", "'-sav' in sys.argv: plot = 1 else: plot = 0 if '-b' in", "ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits,", "the correct operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] # then make", "pmag.pseudo(DIDDs) if kappa != 0: for k in range(len(PDs)): d, i = pmag.fshdev(kappa)", "test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens format file, er_samples.txt format file", "3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile", "3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites formatted", "azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile = 'sites.txt' else:", "will be in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()]", "if '-exc' in sys.argv: keep = 1 for site_crit in SiteCrits: crit_name =", "azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in", "orientation will be in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata =", "sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for crit in crits: if", "If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is", "= pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data if len(GEOrecs) > 0:", "bootstraps, default is 1000 -b MIN, MAX, set bounds for untilting, default is", "Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) >", "rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey])", "{'<' : operator.lt, '<=' : operator.le, '>' : operator.gt, '>=': operator.ge, '=': operator.eq}", "pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\\t', header=1) # keep", "dip != 0 and dip_dir != -1: if '-exc' in sys.argv: keep =", "OPTIONS -h prints help message and quits -f sites formatted file [default for", "if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for crit", "{'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary if not set_env.IS_WIN:", "pmag_specimens format file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command line options]", "print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return>", "num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print(\"-W- Dropped {}", "ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might be in a sample file", "sys.exit() # graceful quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot =", "because each failed one or more criteria\".format(num_dropped)) else: print('no geographic directional data found')", "0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing", "tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('%", "'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for CDF of", "print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D,", "data = df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn into", "= df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might be", "PDs[k][2] = dipdir PDs[k][3] = dip for perc in Percs: tilt = np.array([1.,", "coordinates % Untilting: The dashed (red) curves are representative plots of maximum eigenvalue", "passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep == 1: DIDDs.append([Dec, Inc,", "sys.argv: orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\")", "# get principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--') #", "= pmag.pseudo(DIDDs) if kappa != 0: for k in range(len(PDs)): d, i =", "model 3) -n NB, set number of bootstraps, default is 1000 -b MIN,", "each failed one or more criteria\".format(num_dropped)) else: print('no geographic directional data found') sys.exit()", "rec in GEOrecs: # parse data dip, dip_dir = 0, -1 Dec =", "data list data = list(data.T.apply(dict)) # get orientation data if data_model_num == 3:", "in range(nboot): # do bootstrap data sets - plot first 25 as dashed", "in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files) if __name__ == \"__main__\":", "and quits -f sites formatted file [default for 3.0 is sites.txt, for 2.5,", "1 else: plot = 0 if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min", "on the % untilting that yields the most clustered result (maximum tau_1). Command", "% Untilting required to maximize tau for all the bootstrapped data sets. The", "inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col =", "!= -1: if '-exc' in sys.argv: keep = 1 for site_crit in SiteCrits:", "if n % 50 == 0: print(n) Taus = [] # set up", "sys import numpy as np import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import", "formatted file -fsi sites formatted file -exc use criteria to set acceptance criteria", "(maximum tau_1). Command line: prints out the bootstrapped iterations and finally the confidence", "use criteria to set acceptance criteria (supported only for data model 3) -n", "NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010) on data INPUT FORMAT", "(for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints help message and", "bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints help message and quits", "orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df", "- plot first 25 as dashed red line if n % 50 ==", "% untilting that yields the most clustered result (maximum tau_1). Command line: prints", "data in tilt adjusted coordinates % Untilting: The dashed (red) curves are representative", "100, then a post-tilt magnetization is indicated If the 95% conf bounds exclude", "critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col =", "inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col =", "dip_direction, dip for rec in GEOrecs: # parse data dip, dip_dir = 0,", "= dip for perc in Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D,", "sys.argv: keep = 1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name", "data = list(data.T.apply(dict)) # get orientation data if data_model_num == 3: # often", "and crit_name in rec.keys() and rec[crit_name]: # get the correct operation (<, >=,", "CDF (green)') Untilt.sort() # now for CDF of tilt of maximum tau plt.plot(Untilt,", "data dip, dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs", "in tilt adjusted coordinates % Untilting: The dashed (red) curves are representative plots", "ans = input('S[a]ve all figures, <Return> to quit \\n ') if ans !=", "= input('S[a]ve all figures, <Return> to quit \\n ') if ans != 'a':", "else: untilt_min, untilt_max = -10, 150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg(\"-fsa\",", "matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib import", "MagIC data model number (2 or 3, default 3) OUTPUT Geographic: is an", "line: prints out the bootstrapped iterations and finally the confidence bounds on optimum", "if help is needed print(main.__doc__) sys.exit() # graceful quit kappa = 0 dir_path", "- %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot ==", "in original coordinates Stratigraphic: is an equal area projection of the input data", "matplotlib import pyplot as plt import pmagpy.pmag as pmag import pmagpy.pmagplotlib as pmagplotlib", "pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2:", "sets - plot first 25 as dashed red line if n % 50", "(red) curves are representative plots of maximum eigenvalue (tau_1) as a function of", "else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile", "pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D,", "range(nboot): # do bootstrap data sets - plot first 25 as dashed red", "ymax=1, linewidth=1, linestyle='--') tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent", "bounds include 100, then a post-tilt magnetization is indicated If the 95% conf", "MIN, MAX, set bounds for untilting, default is -10, 150 -fmt FMT, specify", "print(\"Good bye\") sys.exit() files = {} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s'", "records with tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\")", "both 0 and 100, syn-tilt magnetization is possible as is vertical axis rotation", "3: # often orientation will be in infile (sites table) if os.path.split(orfile)[1] ==", "exclude both 0 and 100, syn-tilt magnetization is possible as is vertical axis", "= 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt'", "= 'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt'", "as dashed red line if n % 50 == 0: print(n) Taus =", "ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max =", "dashed red line if n % 50 == 0: print(n) Taus = []", "'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile = 'sites.txt' else: orfile =", "Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot,", "if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to quit", "the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep ==", "'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col", "if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get to work # PLTS =", "# turn into pmag data list data = list(data.T.apply(dict)) # get orientation data", "data if len(GEOrecs) > 0: # have some geographic data num_dropped = 0", "dip for rec in GEOrecs: # parse data dip, dip_dir = 0, -1", "post-tilt magnetization is indicated If the 95% conf bounds exclude both 0 and", "= pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt =", "-fsa samples formatted file -fsi sites formatted file -exc use criteria to set", "= int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile =", "[] for crit in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get", "file -exc use criteria to set acceptance criteria (supported only for data model", "else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile)", "nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt = pmag.get_named_arg(\"-fmt\", \"svg\") data_model_num", "TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt", "\\n ') if ans != 'a': print(\"Good bye\") sys.exit() files = {} for", "instead else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type", "1000 -b MIN, MAX, set bounds for untilting, default is -10, 150 -fmt", "= 'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey", "25: plt.plot(Percs, Taus, 'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) /", "data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") # turn into pmag data list", "rec.keys() and rec[crit_name]: # get the correct operation (<, >=, =, etc.) op", "SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and rec[crit_name]: #", "more criteria\".format(num_dropped)) else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data", "list for dec inc dip_direction, dip for rec in GEOrecs: # parse data", "set up list for dec inc dip_direction, dip for rec in GEOrecs: #", "dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt", "is the cumulative distribution of the % Untilting required to maximize tau for", "files = {} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS,", "= data if len(GEOrecs) > 0: # have some geographic data num_dropped =", "be patient.....') for n in range(nboot): # do bootstrap data sets - plot", "0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1", "or 3, default 3) OUTPUT Geographic: is an equal area projection of the", "= pd.read_csv(infile, sep='\\t', header=1) # keep only records with tilt_col data = df.copy()", "95% confidence bounds on the % untilting that yields the most clustered result", "the input data in tilt adjusted coordinates % Untilting: The dashed (red) curves", "operator OPS = {'<' : operator.lt, '<=' : operator.le, '>' : operator.gt, '>=':", "if data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col =", "default is -10, 150 -fmt FMT, specify format - default is svg -sav", "else: ordata = pd.read_csv(orfile, sep='\\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type =", "'taus': 3} # make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'],", "OPS = {'<' : operator.lt, '<=' : operator.le, '>' : operator.gt, '>=': operator.ge,", "now for CDF of tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower =", "= list(ordata.T.apply(dict)) # sometimes orientation might be in a sample file instead else:", "bounds for untilting, default is -10, 150 -fmt FMT, specify format - default", "plots of maximum eigenvalue (tau_1) as a function of untilting The solid line", "[default for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi", "tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot)", "the most clustered result (maximum tau_1). Command line: prints out the bootstrapped iterations", "for dec inc dip_direction, dip for rec in GEOrecs: # parse data dip,", "pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of bootstraps fmt = pmag.get_named_arg(\"-fmt\",", "' iterations...please be patient.....') for n in range(nboot): # do bootstrap data sets", "of maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower],", "TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs", "# tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k')", "'site_bed_dip_direction' else: if data_model_num == 3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt'", "and rec[crit_name]: # get the correct operation (<, >=, =, etc.) op =", "site_col = 'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey,", "ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might", "os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) #", "data_model_num = int(float(pmag.get_named_arg(\"-DM\", 3))) if data_model_num == 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile", "syn-tilt magnetization is possible as is vertical axis rotation or other pathologies \"\"\"", "for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys()", "result (maximum tau_1). Command line: prints out the bootstrapped iterations and finally the", "= 0 dir_path = pmag.get_named_arg(\"-WD\", \".\") nboot = int(float(pmag.get_named_arg(\"-n\", 1000))) # number of", "1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir,", "D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get", "of tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper =", "< 25: plt.plot(Percs, Taus, 'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n)", "(Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans =", "dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if", "\"\") if data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey", "to set acceptance criteria (supported only for data model 3) -n NB, set", "possible as is vertical axis rotation or other pathologies \"\"\" if '-h' in", "= pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0:", "saves plots and quits -DM NUM MagIC data model number (2 or 3,", "'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey =", "= 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv:", "'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1 else: plot", "with tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), \"\") #", "'%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot", "GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data if len(GEOrecs) >", "if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd from matplotlib import pyplot", "crit_name in rec.keys() and rec[crit_name]: # get the correct operation (<, >=, =,", "if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict))", "Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs =", "crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1", "'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction'", "pmag import pmagpy.pmagplotlib as pmagplotlib from pmag_env import set_env import operator OPS =", "PDs = pmag.pseudo(DIDDs) if kappa != 0: for k in range(len(PDs)): d, i", "equal area projection of the input data in tilt adjusted coordinates % Untilting:", "the bootstrapped iterations and finally the confidence bounds on optimum untilting. If the", "(green)') Untilt.sort() # now for CDF of tilt of maximum tau plt.plot(Untilt, Cdf,", "crit in crits: if crit[crit_col] == \"DE-SITE\": SiteCrits.append(crit) #break # get to work", "dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile = 'sites.txt'", "list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please", "perc in Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt)", "import operator OPS = {'<' : operator.lt, '<=' : operator.le, '>' : operator.gt,", "as a function of untilting The solid line is the cumulative distribution of", "if len(orecs) > 0: if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey]", "dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped:", "as np import matplotlib if matplotlib.get_backend() != \"TKAgg\": matplotlib.use(\"TKAgg\") import pandas as pd", "input data in tilt adjusted coordinates % Untilting: The dashed (red) curves are", "= float(orecs[0][azkey]) if orecs[0][dipkey] != \"\": dip = float(orecs[0][dipkey]) if dip != 0", "all the bootstrapped data sets. The dashed vertical lines are 95% confidence bounds", "number of bootstraps, default is 1000 -b MIN, MAX, set bounds for untilting,", "Stratigraphic: is an equal area projection of the input data in tilt adjusted", "vertical axis rotation or other pathologies \"\"\" if '-h' in sys.argv: # check", "int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if '-fsa' in sys.argv: orfile =", "data_model_num == 3: # often orientation will be in infile (sites table) if", "'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile", "-fsi sites formatted file -exc use criteria to set acceptance criteria (supported only", "== 3: infile = pmag.get_named_arg(\"-f\", 'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col", "pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col =", "plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i - %i %s' % (Untilt[lower],", "= '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if", "orfile = pmag.get_named_arg(\"-fsi\", \"\") if data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction'", "else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile =", "be in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata", "orientation might be in a sample file instead else: ordata = pd.read_csv(orfile, sep='\\t',", "for rec in GEOrecs: # parse data dip, dip_dir = 0, -1 Dec", "'-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for crit in", "-sav saves plots and quits -DM NUM MagIC data model number (2 or", "orfile = pmag.get_named_arg(\"-fsa\", \"\") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg(\"-fsi\", \"\") if", "pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T')", "plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to quit \\n", "'T') if len(orecs) > 0: if orecs[0][azkey] != \"\": dip_dir = float(orecs[0][azkey]) if", "== 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus'])", "'a': print(\"Good bye\") sys.exit() files = {} for key in list(PLTS.keys()): files[key] =", "= 'criteria.txt' else: infile = pmag.get_named_arg(\"-f\", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name'", "2, 'taus': 3} # make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5)", "is needed print(main.__doc__) sys.exit() # graceful quit kappa = 0 dir_path = pmag.get_named_arg(\"-WD\",", "'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path)", "df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might be in", "0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col,", "-f sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa", "work # PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot", "= 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey", "linestyle='--') tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit)", "= 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile = 'sites.txt' else: orfile", "= 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction'", "'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if" ]
[ "from reikna.algorithms import PureParallel from matplotlib import cm import time as t import", "(0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc", "= plt.subplots() summ = 0 while i<100: t1 = t.clock() data0 = data[j:(j+1)*batch,", "400次 t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile", "= data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g) out = out.get() t2", "mylog as Log np.set_printoptions(threshold=np.inf) batch = 100 N = 1024 api = cluda.any_api()", "dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array", "summ + t2-t1 j = j + 1 i = i + 1", "= t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch))", "100 N = 1024 api = cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy')", "data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次", "data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log", "N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft,", "as clarray from pyopencl import clmath from reikna.core import Computation, Transformation, Parameter, Annotation,", "statistic_functions4 as sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch = 100 N =", "data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift =", "STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu", "data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f", "log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure()", "%f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig,", "dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu,", "shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f, log: %f, stat:", "= t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft =", "= abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st = t.clock() TS_gpu =", "import matplotlib.pyplot as plt import statistic_functions4 as sf #import mylog as Log np.set_printoptions(threshold=np.inf)", "t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size =", "t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g) out =", "= cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f, log:", "cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f,", "''' #FFT t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128)", "clarray from pyopencl import clmath from reikna.core import Computation, Transformation, Parameter, Annotation, Type", "N)) # 一共 batch*4 = 400次 t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128)", "Type from reikna.algorithms import PureParallel from matplotlib import cm import time as t", "cluda-Array (1000, 1024) ''' #FFT t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu", "= sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array (100, 1024)", "FFTShift import pyopencl.array as clarray from pyopencl import clmath from reikna.core import Computation,", "sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array (100, 1024) -----------------------------------------------", "data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): '''", "output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft = t.clock() data_f = thr.array(data.shape,", "print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots() summ =", "data) shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size =", "batch)) #统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu,", "----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft = t.clock() data_f =", "as np import reikna.cluda as cluda from reikna.fft import FFT, FFTShift import pyopencl.array", "t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft:", "from reikna.fft import FFT, FFTShift import pyopencl.array as clarray from pyopencl import clmath", "data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g) out = out.get()", "out = out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out,", "plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ", "def myfft(data): ''' input: data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000,", "FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10", "#fig, ax = plt.subplots() summ = 0 while i<100: t1 = t.clock() data0", "plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ +", "cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft =", "(1000, 1024) ''' #FFT t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu =", "+ t2-t1 j = j + 1 i = i + 1 if", "np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次 t1 = t.clock() data0 =", "= 1024 api = cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy') data =", "#compile fft = FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift", "data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1))", "plt.subplots() summ = 0 while i<100: t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128)", "time as t import matplotlib.pyplot as plt import statistic_functions4 as sf #import mylog", "i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots() summ = 0 while i<100: t1", "matplotlib import cm import time as t import matplotlib.pyplot as plt import statistic_functions4", "thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu = abs(STAT_gpu)", "shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N,", "(batch*4, N)) # 一共 batch*4 = 400次 t1 = t.clock() data0 = data[0:batch,", "ax = plt.subplots() summ = 0 while i<100: t1 = t.clock() data0 =", "from reikna.core import Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms import PureParallel from", "global_size = (N, batch)) #统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N),", "= api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) # 一共 batch*4", "Annotation, Type from reikna.algorithms import PureParallel from matplotlib import cm import time as", "= fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr)", "cm import time as t import matplotlib.pyplot as plt import statistic_functions4 as sf", "%d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ + t2-t1 j", "t2-t1, t.clock()-t2)) summ = summ + t2-t1 j = j + 1 i", "= thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape,", "# 一共 batch*4 = 400次 t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g", "print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ + t2-t1", ":].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc = fft.compile(thr)", "= summ + t2-t1 j = j + 1 i = i +", "= (N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return", "sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array", "shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum =", "import cm import time as t import matplotlib.pyplot as plt import statistic_functions4 as", "= 0 while i<100: t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g =", "= t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu,", "= np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次 t1 = t.clock() data0", "data_f) #log t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch))", "pyopencl.array as clarray from pyopencl import clmath from reikna.core import Computation, Transformation, Parameter,", "t.clock()-t2)) summ = summ + t2-t1 j = j + 1 i =", "thr = api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) # 一共", "fft = FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift =", "as t import matplotlib.pyplot as plt import statistic_functions4 as sf #import mylog as", "import Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms import PureParallel from matplotlib import", "reikna.cluda as cluda from reikna.fft import FFT, FFTShift import pyopencl.array as clarray from", "t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral')", "一共 batch*4 = 400次 t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g =", "FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1))", "STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st = t.clock() TS_gpu", "+ 1 i = i + 1 if j == 4: j=0 print('avg", "t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots() summ", "from matplotlib import cm import time as t import matplotlib.pyplot as plt import", "= myfft(data_g) out = out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap =", "t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001)", "batch*4 = 400次 t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0)", "= shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr) def", "import pyopencl.array as clarray from pyopencl import clmath from reikna.core import Computation, Transformation,", "TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128)", "= j + 1 i = i + 1 if j == 4:", "(100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft = t.clock()", "coding=utf-8 import numpy as np import reikna.cluda as cluda from reikna.fft import FFT,", "dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum", "t2-t1 j = j + 1 i = i + 1 if j", "np import reikna.cluda as cluda from reikna.fft import FFT, FFTShift import pyopencl.array as", "= np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次 t1", "fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc =", "fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift", "= t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st =", "j = j + 1 i = i + 1 if j ==", "thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape,", "t import matplotlib.pyplot as plt import statistic_functions4 as sf #import mylog as Log", "t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st", "STAT_gpu, global_size = (N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total:", "(N, batch)) #统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu,", "as plt import statistic_functions4 as sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch =", "thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log =", "%f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax =", "# coding=utf-8 import numpy as np import reikna.cluda as cluda from reikna.fft import", "0 while i<100: t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0)", "= 400次 t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1)", "data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次 t1 = t.clock()", "cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2))", "= thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu =", "import statistic_functions4 as sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch = 100 N", "matplotlib.pyplot as plt import statistic_functions4 as sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch", "myfft(data): ''' input: data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024)", "= (N, batch)) #统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int)", "t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g,", "print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0", "= cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N))", "TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f,", "stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax", "i<100: t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out =", "from pyopencl import clmath from reikna.core import Computation, Transformation, Parameter, Annotation, Type from", "= thr.to_device(data0) out = myfft(data_g) out = out.get() t2 = t.clock() #nipy_spectral plt.clf()", "= sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu:", "global_size = (N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft))", "%f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ + t2-t1 j = j", "clmath from reikna.core import Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms import PureParallel", "reikna.core import Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms import PureParallel from matplotlib", "as Log np.set_printoptions(threshold=np.inf) batch = 100 N = 1024 api = cluda.any_api() thr", "transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ + t2-t1 j =", "out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap =", "%f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0", "cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i,", "Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms import PureParallel from matplotlib import cm", "api = cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4,", "plot: %f'%(i, t2-t1, t.clock()-t2)) summ = summ + t2-t1 j = j +", "np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次 t1 =", "shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data):", "#统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size", "summ = summ + t2-t1 j = j + 1 i = i", "data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc =", "= t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g) out", "pyopencl import clmath from reikna.core import Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms", "%f'%(i, t2-t1, t.clock()-t2)) summ = summ + t2-t1 j = j + 1", "1024 api = cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data,", "t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data)", "FFT, FFTShift import pyopencl.array as clarray from pyopencl import clmath from reikna.core import", "plt import statistic_functions4 as sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch = 100", "= thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log", "#FFT t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f,", "TS_gpu i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots() summ = 0 while i<100:", "plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1,", "cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) #", "'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ =", ":].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g) out = out.get() t2 = t.clock()", "logg10 = sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array (100, 1024) ----------------------------------------------- output:", "batch = 100 N = 1024 api = cluda.any_api() thr = api.Thread.create() data", "= FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f,", "= FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr)", "dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log,", "input: data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT", "abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr,", "sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch = 100 N = 1024 api", "reikna.fft import FFT, FFTShift import pyopencl.array as clarray from pyopencl import clmath from", "summ = 0 while i<100: t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g", "1 i = i + 1 if j == 4: j=0 print('avg compute:", "import reikna.cluda as cluda from reikna.fft import FFT, FFTShift import pyopencl.array as clarray", "= out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap", "data_g = thr.to_device(data0) out = myfft(data_g) out = out.get() t2 = t.clock() #nipy_spectral", "PureParallel from matplotlib import cm import time as t import matplotlib.pyplot as plt", "Parameter, Annotation, Type from reikna.algorithms import PureParallel from matplotlib import cm import time", "api.Thread.create() data = np.load('8psk_data.npy') data = np.reshape(data, (batch*4, N)) # 一共 batch*4 =", "as sf #import mylog as Log np.set_printoptions(threshold=np.inf) batch = 100 N = 1024", "thr.array(data0.shape, dtype=np.complex128) shift = FFTShift(data_f, (0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128)", "#nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No.", "fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size", "dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f) #log t_log = t.clock()", "j=0 fig=plt.figure() #fig, ax = plt.subplots() summ = 0 while i<100: t1 =", "fig=plt.figure() #fig, ax = plt.subplots() summ = 0 while i<100: t1 = t.clock()", "t1 = t.clock() data0 = data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft", "cluda from reikna.fft import FFT, FFTShift import pyopencl.array as clarray from pyopencl import", "plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d,", "''' input: data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) '''", "import FFT, FFTShift import pyopencl.array as clarray from pyopencl import clmath from reikna.core", "logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st = t.clock() TS_gpu = cluda.ocl.Array(thr, shape=(1000,", "i = i + 1 if j == 4: j=0 print('avg compute: %f'%(summ/100))", "= thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f =", "thr.to_device(data0) out = myfft(data_g) out = out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out,", "= 100 N = 1024 api = cluda.any_api() thr = api.Thread.create() data =", "thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): ''' input: data:", "data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g) out = out.get() t2 =", "j + 1 i = i + 1 if j == 4: j=0", "%f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots() summ = 0", "N = 1024 api = cluda.any_api() thr = api.Thread.create() data = np.load('8psk_data.npy') data", "= thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): ''' input:", "1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft = t.clock() data_f", "(0,1)) shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 =", "shiftc = shift.compile(thr) data_shift = thr.array(data0.shape, dtype=np.complex128) sum = sf.stat(thr) logg10 = sf.logg10(thr)", "t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值 t_st = t.clock()", "return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots() summ = 0 while", "t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out = myfft(data_g)", "import clmath from reikna.core import Computation, Transformation, Parameter, Annotation, Type from reikna.algorithms import", "sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st))", "= cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot:", "t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu i=0 j=0 fig=plt.figure() #fig, ax = plt.subplots()", "t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape, dtype=np.complex128) fftc(data_f, data) shiftc(STAT_gpu, data_f)", "= 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2)) summ", "Log np.set_printoptions(threshold=np.inf) batch = 100 N = 1024 api = cluda.any_api() thr =", "(N,batch)) print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st)) print('total: %f'%(t.clock()-t_fft)) return TS_gpu", "#plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000) plt.pause(0.00000001) print('No. %d, transmission+compute:", "#import mylog as Log np.set_printoptions(threshold=np.inf) batch = 100 N = 1024 api =", "as cluda from reikna.fft import FFT, FFTShift import pyopencl.array as clarray from pyopencl", "out = myfft(data_g) out = out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap", "import numpy as np import reikna.cluda as cluda from reikna.fft import FFT, FFTShift", "numpy as np import reikna.cluda as cluda from reikna.fft import FFT, FFTShift import", "import PureParallel from matplotlib import cm import time as t import matplotlib.pyplot as", "= t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot) plt.imshow(out, cmap = 'nipy_spectral') plt.ylim(0,1000)", "cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int) sum(TS_gpu, STAT_gpu, global_size = (N,batch)) print('fft: %f, log: %f,", "sum = sf.stat(thr) logg10 = sf.logg10(thr) def myfft(data): ''' input: data: cluda-Array (100,", "Transformation, Parameter, Annotation, Type from reikna.algorithms import PureParallel from matplotlib import cm import", "np.set_printoptions(threshold=np.inf) batch = 100 N = 1024 api = cluda.any_api() thr = api.Thread.create()", "myfft(data_g) out = out.get() t2 = t.clock() #nipy_spectral plt.clf() #plt.imshow(out, cmap = cm.hot)", "while i<100: t1 = t.clock() data0 = data[j:(j+1)*batch, :].astype(np.complex128) data_g = thr.to_device(data0) out", "reikna.algorithms import PureParallel from matplotlib import cm import time as t import matplotlib.pyplot", "data: cluda-Array (100, 1024) ----------------------------------------------- output: TS_gpu: cluda-Array (1000, 1024) ''' #FFT t_fft", "import time as t import matplotlib.pyplot as plt import statistic_functions4 as sf #import", "= data[0:batch, :].astype(np.complex128) data_g = thr.to_device(data0) print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc", "1024) ''' #FFT t_fft = t.clock() data_f = thr.array(data.shape, dtype=np.complex128) STAT_gpu = thr.array(data.shape,", "print(t.clock()-t1) #compile fft = FFT(data_g, (0,1)) fftc = fft.compile(thr) data_f = thr.array(data0.shape, dtype=np.complex128)", "#log t_log = t.clock() STAT_gpu = abs(STAT_gpu) logg10(STAT_gpu, global_size = (N, batch)) #统计,插值" ]
[ "180.0001 which is a jump, but smaller than the definition of # a", "= numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world = numpy.concatenate((w1,", "<gh_stars>1-10 from kapteyn import maputils import numpy from service import * fignum =", "Y = numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates for constant latitude run", "labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat, title, lon_world=lon_world, lat_world=lat_world, labkwargs0=labkwargs0,", "grat.setp_lineswcs1(0, lw=2) # Remove the left 180 deg and print the right 180", "we need to increase the value of 'gridsamples' to # increase the relative", "title = r\"Polyconic projection (PCO). (Cal. fig.29)\" header = {'NAXIS' : 2, 'NAXIS1':", "latitude run from 180,180 # instead of 0,360. Then one prevents the connection", "Then one prevents the connection between the two points # 179.9999 and 180.0001", ": 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' :", "# Remove the left 180 deg and print the right 180 deg instead", "smaller than the definition of # a rejected jump in the wcsgrat module.", "0,360. Then one prevents the connection between the two points # 179.9999 and", ": 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' :", "'gridsamples' to # increase the relative size of a jump. f = maputils.FITSimage(externalheader=header)", "60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum,", "'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1'", "projection (PCO). (Cal. fig.29)\" header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,", "'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' :", "= r\"Polyconic projection (PCO). (Cal. fig.29)\" header = {'NAXIS' : 2, 'NAXIS1': 100,", "80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg',", "lat_world = [-60, -30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 =", "= numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates for constant latitude run from", "-30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}", "100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1'", "'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2'", "a rejected jump in the wcsgrat module. # Also we need to increase", "{'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0,", "import maputils import numpy from service import * fignum = 25 fig =", "0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0 } X =", ": 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0 } X", "annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0,", "and 180.0001 which is a jump, but smaller than the definition of #", "grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left 180 deg and print the", "5.0 } X = polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let the world", "180 deg instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180 +", "jump, but smaller than the definition of # a rejected jump in the", "= 25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic projection (PCO).", "numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'}", "starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left 180 deg and", "instead of 0,360. Then one prevents the connection between the two points #", "= numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom',", "definition of # a rejected jump in the wcsgrat module. # Also we", "} X = polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates", "rejected jump in the wcsgrat module. # Also we need to increase the", "increase the relative size of a jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame)", "size of a jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum=", "epsilon lon_world = numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30, 60] labkwargs0 =", "the connection between the two points # 179.9999 and 180.0001 which is a", "wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left 180", "lon_world = numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30, 60] labkwargs0 = {'color':'r',", ": 'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' :", "'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2'", "(1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the", "fignum = 25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic projection", "instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world", ": 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0 } X = polrange() Y", "frame = fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal. fig.29)\" header = {'NAXIS'", "fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal. fig.29)\"", ": 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' :", "from 180,180 # instead of 0,360. Then one prevents the connection between the", "need to increase the value of 'gridsamples' to # increase the relative size", "polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates for constant latitude", "wcsgrat module. # Also we need to increase the value of 'gridsamples' to", "jump in the wcsgrat module. # Also we need to increase the value", "!!!!!! Let the world coordinates for constant latitude run from 180,180 # instead", "numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates for constant latitude run from 180,180", "'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2'", "2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' :", "numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world = numpy.concatenate((w1, w2))", ": -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' :", "180,180 # instead of 0,360. Then one prevents the connection between the two", "constant latitude run from 180,180 # instead of 0,360. Then one prevents the", "numpy from service import * fignum = 25 fig = plt.figure(figsize=figsize) frame =", "# 179.9999 and 180.0001 which is a jump, but smaller than the definition", "which is a jump, but smaller than the definition of # a rejected", "than the definition of # a rejected jump in the wcsgrat module. #", "prevents the connection between the two points # 179.9999 and 180.0001 which is", "'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40,", "'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,", "+ epsilon lon_world = numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30, 60] labkwargs0", "'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat, title, lon_world=lon_world, lat_world=lat_world,", "= annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) #", "(PCO). (Cal. fig.29)\" header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1'", "startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left 180 deg", "'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2'", "'CDELT2' : 5.0 } X = polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let", "the left 180 deg and print the right 180 deg instead w1 =", "40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0 } X = polrange() Y =", "'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat, title, lon_world=lon_world,", "relative size of a jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat =", "= [-60, -30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b',", "service import * fignum = 25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title", "= 180 + epsilon lon_world = numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30,", "header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1'", "Let the world coordinates for constant latitude run from 180,180 # instead of", "lw=2) # Remove the left 180 deg and print the right 180 deg", "180 deg and print the right 180 deg instead w1 = numpy.arange(0,151,30.0) w2", "of a jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2),", "= maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y,", "Remove the left 180 deg and print the right 180 deg instead w1", "for constant latitude run from 180,180 # instead of 0,360. Then one prevents", "the world coordinates for constant latitude run from 180,180 # instead of 0,360.", "a jump, but smaller than the definition of # a rejected jump in", "f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X,", "the wcsgrat module. # Also we need to increase the value of 'gridsamples'", ": 'deg', 'CDELT2' : 5.0 } X = polrange() Y = numpy.arange(-75,90,15.0) #", "from kapteyn import maputils import numpy from service import * fignum = 25", "deg and print the right 180 deg instead w1 = numpy.arange(0,151,30.0) w2 =", "coordinates for constant latitude run from 180,180 # instead of 0,360. Then one", "in the wcsgrat module. # Also we need to increase the value of", "= polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates for constant", "= {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat,", "world coordinates for constant latitude run from 180,180 # instead of 0,360. Then", "# instead of 0,360. Then one prevents the connection between the two points", "plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal. fig.29)\" header =", "r\"Polyconic projection (PCO). (Cal. fig.29)\" header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2':", "'deg', 'CDELT2' : 5.0 } X = polrange() Y = numpy.arange(-75,90,15.0) # !!!!!!", "wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left", "179.9999 and 180.0001 which is a jump, but smaller than the definition of", "between the two points # 179.9999 and 180.0001 which is a jump, but", "180 + epsilon lon_world = numpy.concatenate((w1, w2)) lat_world = [-60, -30, 30, 60]", "a jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0),", "maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000)", "= f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2)", "numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world = numpy.concatenate((w1, w2)) lat_world = [-60,", "# increase the relative size of a jump. f = maputils.FITSimage(externalheader=header) annim =", "# Also we need to increase the value of 'gridsamples' to # increase", "from service import * fignum = 25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox)", ": 5.0 } X = polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let the", "of 0,360. Then one prevents the connection between the two points # 179.9999", "run from 180,180 # instead of 0,360. Then one prevents the connection between", "gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left 180 deg and print", "the two points # 179.9999 and 180.0001 which is a jump, but smaller", "Also we need to increase the value of 'gridsamples' to # increase the", "import * fignum = 25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title =", "25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal.", "connection between the two points # 179.9999 and 180.0001 which is a jump,", "w2 = numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world = numpy.concatenate((w1, w2)) lat_world", "f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0,", "left 180 deg and print the right 180 deg instead w1 = numpy.arange(0,151,30.0)", "jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),", "* fignum = 25 fig = plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic", "-5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg',", "= fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal. fig.29)\" header = {'NAXIS' :", "w2[0] = 180 + epsilon lon_world = numpy.concatenate((w1, w2)) lat_world = [-60, -30,", "print the right 180 deg instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0]", "X = polrange() Y = numpy.arange(-75,90,15.0) # !!!!!! Let the world coordinates for", "0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO',", "is a jump, but smaller than the definition of # a rejected jump", "# a rejected jump in the wcsgrat module. # Also we need to", "'DEC--PCO', 'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0", "= plt.figure(figsize=figsize) frame = fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal. fig.29)\" header", "grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2)", "points # 179.9999 and 180.0001 which is a jump, but smaller than the", "of 'gridsamples' to # increase the relative size of a jump. f =", "{'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat, title, lon_world=lon_world, lat_world=lat_world, labkwargs0=labkwargs0, labkwargs1=labkwargs1, markerpos=markerpos)", "lw=2) grat.setp_lineswcs1(0, lw=2) # Remove the left 180 deg and print the right", "50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2' : 'DEC--PCO', 'CRVAL2' : 0.0,", "kapteyn import maputils import numpy from service import * fignum = 25 fig", "labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim,", "{'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat, title,", "right 180 deg instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180", "# !!!!!! Let the world coordinates for constant latitude run from 180,180 #", "'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1' : 50,", "= numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world = numpy.concatenate((w1, w2)) lat_world =", "of # a rejected jump in the wcsgrat module. # Also we need", ": 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2' :", "the value of 'gridsamples' to # increase the relative size of a jump.", "but smaller than the definition of # a rejected jump in the wcsgrat", "deg instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180 + epsilon", "to increase the value of 'gridsamples' to # increase the relative size of", "[-60, -30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom',", "maputils import numpy from service import * fignum = 25 fig = plt.figure(figsize=figsize)", "and print the right 180 deg instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0)", "import numpy from service import * fignum = 25 fig = plt.figure(figsize=figsize) frame", "annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180), startx=X, starty=Y, gridsamples=2000) grat.setp_lineswcs0(0, lw=2) grat.setp_lineswcs1(0, lw=2) # Remove", "'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0 }", "to # increase the relative size of a jump. f = maputils.FITSimage(externalheader=header) annim", "increase the value of 'gridsamples' to # increase the relative size of a", "fig.29)\" header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO',", "one prevents the connection between the two points # 179.9999 and 180.0001 which", "(Cal. fig.29)\" header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' :", ": 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' : 0.0, 'CRPIX1'", "w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] = 180 + epsilon lon_world =", "30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame,", "two points # 179.9999 and 180.0001 which is a jump, but smaller than", "the relative size of a jump. f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat", "w2)) lat_world = [-60, -30, 30, 60] labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'} labkwargs1", "fig.add_axes(plotbox) title = r\"Polyconic projection (PCO). (Cal. fig.29)\" header = {'NAXIS' : 2,", "'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0, 'CTYPE2'", "= {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80, 'CTYPE1' : 'RA---PCO', 'CRVAL1' :", "'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0 } X = polrange()", "the right 180 deg instead w1 = numpy.arange(0,151,30.0) w2 = numpy.arange(180,360,30.0) w2[0] =", "the definition of # a rejected jump in the wcsgrat module. # Also", "module. # Also we need to increase the value of 'gridsamples' to #", "value of 'gridsamples' to # increase the relative size of a jump. f", "'CUNIT2' : 'deg', 'CDELT2' : 5.0 } X = polrange() Y = numpy.arange(-75,90,15.0)", "= {'color':'b', 'va':'bottom', 'ha':'right'} doplot(frame, fignum, annim, grat, title, lon_world=lon_world, lat_world=lat_world, labkwargs0=labkwargs0, labkwargs1=labkwargs1," ]
[ "from .models import QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model", "django_filters from .models import QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta:", "= django_filters.BooleanFilter() class Meta: model = QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved", "class Meta: model = QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter()", "<gh_stars>1-10 import django_filters from .models import QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter()", "QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model =", "('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = ExamComment fields =", "import django_filters from .models import QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class", ".models import QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model =", "import QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = QuestionComment", "class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = QuestionComment fields = ('is_resolved',)", "model = QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta:", "class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = ExamComment fields = ('is_resolved',)", "fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = ExamComment", "= QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model", "django_filters.BooleanFilter() class Meta: model = QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved =", "QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = QuestionComment fields = ('is_resolved',) class", "Meta: model = QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class", "ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = QuestionComment fields =", "is_resolved = django_filters.BooleanFilter() class Meta: model = QuestionComment fields = ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet):", "QuestionComment, ExamComment class QuestionCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = QuestionComment fields", "= ('is_resolved',) class ExamCommentFilter(django_filters.FilterSet): is_resolved = django_filters.BooleanFilter() class Meta: model = ExamComment fields" ]
[ "from .hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz import neg_michalewicz from", "import neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz import", "from .styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\", \"neg_eggholder\", \"neg_hartmann6\", \"neg_holder_table\", \"neg_michalewicz\", \"neg_styblinski_tang\",", ".styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\", \"neg_eggholder\", \"neg_hartmann6\", \"neg_holder_table\", \"neg_michalewicz\", \"neg_styblinski_tang\", ]", "#!/usr/bin/env python3 from .branin import neg_branin from .eggholder import neg_eggholder from .hartmann6 import", "neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\",", "neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang", "neg_branin from .eggholder import neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table", ".eggholder import neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz", "neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\", \"neg_eggholder\", \"neg_hartmann6\", \"neg_holder_table\", \"neg_michalewicz\",", "from .holder_table import neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__", ".hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang", "from .branin import neg_branin from .eggholder import neg_eggholder from .hartmann6 import neg_hartmann6 from", "import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\", \"neg_eggholder\", \"neg_hartmann6\", \"neg_holder_table\",", "from .eggholder import neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table from", "<reponame>cnheider/botorch #!/usr/bin/env python3 from .branin import neg_branin from .eggholder import neg_eggholder from .hartmann6", "import neg_branin from .eggholder import neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table import", "neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz import neg_michalewicz", "import neg_hartmann6 from .holder_table import neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang import", "python3 from .branin import neg_branin from .eggholder import neg_eggholder from .hartmann6 import neg_hartmann6", "from .michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\", \"neg_eggholder\",", ".holder_table import neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ =", "import neg_holder_table from .michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ = [", ".branin import neg_branin from .eggholder import neg_eggholder from .hartmann6 import neg_hartmann6 from .holder_table", ".michalewicz import neg_michalewicz from .styblinski_tang import neg_styblinski_tang __all__ = [ \"neg_branin\", \"neg_eggholder\", \"neg_hartmann6\"," ]
[ "Homework') homeworks = [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5],", "def getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks = [] for", "db def getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks = []", "idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values (?, ?, ?, ?,", "rendreEnLigne' ') values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',", "homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks def insert(self,", "FROM Homework') homeworks = [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4],", "?, ?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(), homework.getDonneLe(),", "db): self._db = db def getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM Homework')", "effectue, interrogation, rendreEnLigne' ') values (?, ?, ?, ?, ?, ?, ?, ?,", "homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks def insert(self, homework): cur = self._db.cursor()", "?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(), homework.getDonneLe(), homework.getEffectue(),", "__init__(self, db): self._db = db def getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM", "codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values (?, ?,", "homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks def", "?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(), homework.getDonneLe(), homework.getEffectue(), homework.getInterrogation(), homework.getRendreEnLigne(),))", "INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne'", "[] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8],", "') values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (homework.getDate(),", "= [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7],", "(date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values", "Homework class HomeworkManager: def __init__(self, db): self._db = db def getAll(self): cur =", "homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir' ',", "cur = self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire,", "?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(), homework.getDonneLe(), homework.getEffectue(), homework.getInterrogation(),", "homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks def insert(self, homework): cur", "documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values (?, ?, ?, ?, ?, ?,", "Homework (date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ')", "', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values (?, ?, ?, ?, ?,", "self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks = [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1],", "= self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks = [] for homework in cur.fetchall():", "= self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe,", "homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10]))", "aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values (?, ?, ?,", "return homeworks def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere,", "def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire,", "for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9],", "homeworks = [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6],", "insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir'", "?, ?, ?, ?, ?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(),", "?, ?, ?, ?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(),", "= db def getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks =", "from bot.db.entities.Homework import Homework class HomeworkManager: def __init__(self, db): self._db = db def", "HomeworkManager: def __init__(self, db): self._db = db def getAll(self): cur = self._db.cursor() cur.execute('SELECT", "homeworks def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere,", "in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return", "getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks = [] for homework", "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(),", "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(),", "* FROM Homework') homeworks = [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3],", "homework[7], homework[8], homework[9], homework[10])) return homeworks def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT", "self._db.cursor() cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue,", "bot.db.entities.Homework import Homework class HomeworkManager: def __init__(self, db): self._db = db def getAll(self):", "homework[8], homework[9], homework[10])) return homeworks def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO", "?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(), homework.getDonneLe(), homework.getEffectue(), homework.getInterrogation(), homework.getRendreEnLigne(),)) self._db.commit()", "matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne' ') values (?,", "self._db = db def getAll(self): cur = self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks", "homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks def insert(self, homework):", "donneLe, effectue, interrogation, rendreEnLigne' ') values (?, ?, ?, ?, ?, ?, ?,", "interrogation, rendreEnLigne' ') values (?, ?, ?, ?, ?, ?, ?, ?, ?,", "def __init__(self, db): self._db = db def getAll(self): cur = self._db.cursor() cur.execute('SELECT *", "homework[10])) return homeworks def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework (date,", "cur.execute('SELECT * FROM Homework') homeworks = [] for homework in cur.fetchall(): homeworks.append(Homework(homework[1], homework[2],", "?, ?, ?, ?, ?, ?, ?)', (homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(), homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(),", "class HomeworkManager: def __init__(self, db): self._db = db def getAll(self): cur = self._db.cursor()", "import Homework class HomeworkManager: def __init__(self, db): self._db = db def getAll(self): cur", "cur.fetchall(): homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks", "homework[5], homework[6], homework[7], homework[8], homework[9], homework[10])) return homeworks def insert(self, homework): cur =", "cur = self._db.cursor() cur.execute('SELECT * FROM Homework') homeworks = [] for homework in", "homework[9], homework[10])) return homeworks def insert(self, homework): cur = self._db.cursor() cur.execute('INSERT INTO Homework", "cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir' ', documentsAFaire, donneLe, effectue, interrogation," ]
[ "'.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames = () out = 0 for", "'rU')) fieldnames = () out = 0 for row in reader: temp =", "+= ': \"' temp += a temp += '\",' nameCounter += 1 itemCounter", "0 for row in reader: temp = [] # print row fieldnames =", "temp = temp[:-1] temp += \"}\" # print temp out += temp out", "'\"' temp += ': \"' temp += a temp += '\",' nameCounter +=", "temp += '\",' nameCounter += 1 itemCounter += 1 temp = temp[:-1] temp", "# print row itemCounter = 0 temp = \"{\" for item in row:", "csvfile = open(files, 'r') jsonfile = open(files[:-4] + '.json', 'w') reader = csv.reader(open(files,", "print row fieldnames = row break counter = 0 out = \"\" for", "temp out += temp out += \", \" counter += 1 out =", "json import glob for files in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile =", "itemCounter == nameCounter: # print item temp += ' \"' temp += item", "skip print \"0\" else: # print row itemCounter = 0 temp = \"{\"", "= item # print a nameCounter = 0 for item in fieldnames: if", "in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile = open(files[:-4] + '.json', 'w') reader", "item temp += '\"' temp += ': \"' temp += a temp +=", "0 for item in fieldnames: if itemCounter == nameCounter: # print item temp", "out = \"\" for row in reader: if counter == 0: # skip", "glob for files in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile = open(files[:-4] +", "temp += ' \"' temp += item temp += '\"' temp += ':", "else: # print row itemCounter = 0 temp = \"{\" for item in", "+= ' \"' temp += item temp += '\"' temp += ': \"'", "that converts CSV to JSON for my MongoDB project. # - It converts", "0 out = \"\" for row in reader: if counter == 0: #", "+= item temp += '\"' temp += ': \"' temp += a temp", "= 0 out = \"\" for row in reader: if counter == 0:", "import json import glob for files in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile", "converts all the csv files under the directory at once. import csv import", "0 temp = \"{\" for item in row: a = item # print", "out = 0 for row in reader: temp = [] # print row", "'\",' nameCounter += 1 itemCounter += 1 temp = temp[:-1] temp += \"}\"", "nameCounter: # print item temp += ' \"' temp += item temp +=", "nameCounter = 0 for item in fieldnames: if itemCounter == nameCounter: # print", "Python program that converts CSV to JSON for my MongoDB project. # -", "JSON for my MongoDB project. # - It converts all the csv files", "at once. import csv import json import glob for files in glob.glob(\"*.csv\"): csvfile", "\"' temp += item temp += '\"' temp += ': \"' temp +=", "\"' temp += a temp += '\",' nameCounter += 1 itemCounter += 1", "row break counter = 0 out = \"\" for row in reader: if", "+= \", \" counter += 1 out = out[:-1] jsonfile.write(out) print \"End of", "== 0: # skip print \"0\" else: # print row itemCounter = 0", "csv files under the directory at once. import csv import json import glob", "for files in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile = open(files[:-4] + '.json',", "+ '.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames = () out = 0", "\"0\" else: # print row itemCounter = 0 temp = \"{\" for item", "temp += '\"' temp += ': \"' temp += a temp += '\",'", "- It converts all the csv files under the directory at once. import", "temp += a temp += '\",' nameCounter += 1 itemCounter += 1 temp", "a nameCounter = 0 for item in fieldnames: if itemCounter == nameCounter: #", "print item temp += ' \"' temp += item temp += '\"' temp", "== nameCounter: # print item temp += ' \"' temp += item temp", "+= 1 temp = temp[:-1] temp += \"}\" # print temp out +=", "reader: if counter == 0: # skip print \"0\" else: # print row", "glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile = open(files[:-4] + '.json', 'w') reader =", "temp = \"{\" for item in row: a = item # print a", "open(files, 'r') jsonfile = open(files[:-4] + '.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames", "for my MongoDB project. # - It converts all the csv files under", "'r') jsonfile = open(files[:-4] + '.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames =", "= open(files, 'r') jsonfile = open(files[:-4] + '.json', 'w') reader = csv.reader(open(files, 'rU'))", "= csv.reader(open(files, 'rU')) fieldnames = () out = 0 for row in reader:", "+= '\"' temp += ': \"' temp += a temp += '\",' nameCounter", "once. import csv import json import glob for files in glob.glob(\"*.csv\"): csvfile =", "jsonfile = open(files[:-4] + '.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames = ()", "# Simple Python program that converts CSV to JSON for my MongoDB project.", "reader = csv.reader(open(files, 'rU')) fieldnames = () out = 0 for row in", "+= 1 itemCounter += 1 temp = temp[:-1] temp += \"}\" # print", "\"}\" # print temp out += temp out += \", \" counter +=", "in reader: temp = [] # print row fieldnames = row break counter", "reader: temp = [] # print row fieldnames = row break counter =", "1 temp = temp[:-1] temp += \"}\" # print temp out += temp", "out += temp out += \", \" counter += 1 out = out[:-1]", "nameCounter += 1 itemCounter += 1 temp = temp[:-1] temp += \"}\" #", "fieldnames: if itemCounter == nameCounter: # print item temp += ' \"' temp", "= \"\" for row in reader: if counter == 0: # skip print", "a = item # print a nameCounter = 0 for item in fieldnames:", "CSV to JSON for my MongoDB project. # - It converts all the", "itemCounter = 0 temp = \"{\" for item in row: a = item", "item # print a nameCounter = 0 for item in fieldnames: if itemCounter", "\"{\" for item in row: a = item # print a nameCounter =", "print temp out += temp out += \", \" counter += 1 out", "for item in row: a = item # print a nameCounter = 0", "print \"0\" else: # print row itemCounter = 0 temp = \"{\" for", "= 0 for row in reader: temp = [] # print row fieldnames", "import glob for files in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile = open(files[:-4]", "= () out = 0 for row in reader: temp = [] #", "() out = 0 for row in reader: temp = [] # print", "row in reader: temp = [] # print row fieldnames = row break", "temp = [] # print row fieldnames = row break counter = 0", "= \"{\" for item in row: a = item # print a nameCounter", "+= '\",' nameCounter += 1 itemCounter += 1 temp = temp[:-1] temp +=", "Simple Python program that converts CSV to JSON for my MongoDB project. #", "temp += item temp += '\"' temp += ': \"' temp += a", "print row itemCounter = 0 temp = \"{\" for item in row: a", "under the directory at once. import csv import json import glob for files", "a temp += '\",' nameCounter += 1 itemCounter += 1 temp = temp[:-1]", "# - It converts all the csv files under the directory at once.", "files under the directory at once. import csv import json import glob for", "itemCounter += 1 temp = temp[:-1] temp += \"}\" # print temp out", "for row in reader: if counter == 0: # skip print \"0\" else:", "[] # print row fieldnames = row break counter = 0 out =", "= temp[:-1] temp += \"}\" # print temp out += temp out +=", "counter = 0 out = \"\" for row in reader: if counter ==", "0: # skip print \"0\" else: # print row itemCounter = 0 temp", "= 0 temp = \"{\" for item in row: a = item #", "row: a = item # print a nameCounter = 0 for item in", "' \"' temp += item temp += '\"' temp += ': \"' temp", "+= \"}\" # print temp out += temp out += \", \" counter", "+= temp out += \", \" counter += 1 out = out[:-1] jsonfile.write(out)", "for row in reader: temp = [] # print row fieldnames = row", "= [] # print row fieldnames = row break counter = 0 out", "\", \" counter += 1 out = out[:-1] jsonfile.write(out) print \"End of Execution\"", "1 itemCounter += 1 temp = temp[:-1] temp += \"}\" # print temp", "all the csv files under the directory at once. import csv import json", "# print item temp += ' \"' temp += item temp += '\"'", "+= a temp += '\",' nameCounter += 1 itemCounter += 1 temp =", "'w') reader = csv.reader(open(files, 'rU')) fieldnames = () out = 0 for row", "files in glob.glob(\"*.csv\"): csvfile = open(files, 'r') jsonfile = open(files[:-4] + '.json', 'w')", "break counter = 0 out = \"\" for row in reader: if counter", "= 0 for item in fieldnames: if itemCounter == nameCounter: # print item", "in reader: if counter == 0: # skip print \"0\" else: # print", "MongoDB project. # - It converts all the csv files under the directory", "project. # - It converts all the csv files under the directory at", "temp += ': \"' temp += a temp += '\",' nameCounter += 1", "converts CSV to JSON for my MongoDB project. # - It converts all", "the csv files under the directory at once. import csv import json import", "temp out += \", \" counter += 1 out = out[:-1] jsonfile.write(out) print", "fieldnames = () out = 0 for row in reader: temp = []", "= open(files[:-4] + '.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames = () out", "item in fieldnames: if itemCounter == nameCounter: # print item temp += '", "csv.reader(open(files, 'rU')) fieldnames = () out = 0 for row in reader: temp", "row itemCounter = 0 temp = \"{\" for item in row: a =", "to JSON for my MongoDB project. # - It converts all the csv", "directory at once. import csv import json import glob for files in glob.glob(\"*.csv\"):", "fieldnames = row break counter = 0 out = \"\" for row in", "# print a nameCounter = 0 for item in fieldnames: if itemCounter ==", "\"\" for row in reader: if counter == 0: # skip print \"0\"", "row in reader: if counter == 0: # skip print \"0\" else: #", "print a nameCounter = 0 for item in fieldnames: if itemCounter == nameCounter:", "open(files[:-4] + '.json', 'w') reader = csv.reader(open(files, 'rU')) fieldnames = () out =", "': \"' temp += a temp += '\",' nameCounter += 1 itemCounter +=", "the directory at once. import csv import json import glob for files in", "# print temp out += temp out += \", \" counter += 1", "<gh_stars>1-10 # Simple Python program that converts CSV to JSON for my MongoDB", "program that converts CSV to JSON for my MongoDB project. # - It", "temp[:-1] temp += \"}\" # print temp out += temp out += \",", "my MongoDB project. # - It converts all the csv files under the", "if counter == 0: # skip print \"0\" else: # print row itemCounter", "in row: a = item # print a nameCounter = 0 for item", "in fieldnames: if itemCounter == nameCounter: # print item temp += ' \"'", "if itemCounter == nameCounter: # print item temp += ' \"' temp +=", "It converts all the csv files under the directory at once. import csv", "temp += \"}\" # print temp out += temp out += \", \"", "row fieldnames = row break counter = 0 out = \"\" for row", "= row break counter = 0 out = \"\" for row in reader:", "csv import json import glob for files in glob.glob(\"*.csv\"): csvfile = open(files, 'r')", "counter == 0: # skip print \"0\" else: # print row itemCounter =", "item temp += ' \"' temp += item temp += '\"' temp +=", "out += \", \" counter += 1 out = out[:-1] jsonfile.write(out) print \"End", "import csv import json import glob for files in glob.glob(\"*.csv\"): csvfile = open(files,", "for item in fieldnames: if itemCounter == nameCounter: # print item temp +=", "# print row fieldnames = row break counter = 0 out = \"\"", "item in row: a = item # print a nameCounter = 0 for", "# skip print \"0\" else: # print row itemCounter = 0 temp =" ]
[ "def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual(", "Hence the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def", "cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), '", "'') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self):", "than 0, JavaScript frontend is tasked with validating #that key must be >=", "test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu') if __name__ == '__main__': unittest.main()", "def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values", "class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def", "test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx')", "test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt('", "wvu'), 'zyx def') #for values of key less than 0, JavaScript frontend is", "of key less than 0, JavaScript frontend is tasked with validating #that key", "self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of key", "self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def')", "def') #for values of key less than 0, JavaScript frontend is tasked with", "= AtbashCipher() #instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '')", "must be >= 0. Hence the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def", "test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc", "test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx", "test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu') if __name__ ==", "') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc", "0, JavaScript frontend is tasked with validating #that key must be >= 0.", "tasked with validating #that key must be >= 0. Hence the test is", "here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ')", "def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual(", "less than 0, JavaScript frontend is tasked with validating #that key must be", "AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self):", "JavaScript frontend is tasked with validating #that key must be >= 0. Hence", "#for values of key less than 0, JavaScript frontend is tasked with validating", "' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self):", "AtbashCipher test = AtbashCipher() #instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self):", "from atbash_cipher import AtbashCipher test = AtbashCipher() #instantiate test caesar cipher class class", "the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self):", "self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu')", "key must be >= 0. Hence the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase):", "def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'),", "AtbashCipher() #instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def", "self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def", "is tasked with validating #that key must be >= 0. Hence the test", "test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of", "#that key must be >= 0. Hence the test is skipped here class", ">= 0. Hence the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''),", "'') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self):", "0. Hence the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '')", "test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'),", "import AtbashCipher test = AtbashCipher() #instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def", "def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'),", "'), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def", "frontend is tasked with validating #that key must be >= 0. Hence the", "test = AtbashCipher() #instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''),", "class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def", "#instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self):", "atbash_cipher import AtbashCipher test = AtbashCipher() #instantiate test caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase):", "'), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def", "'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of key less than", "self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu')", "def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu') if __name__", "with validating #that key must be >= 0. Hence the test is skipped", "import unittest from atbash_cipher import AtbashCipher test = AtbashCipher() #instantiate test caesar cipher", "'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu') if __name__ == '__main__': unittest.main() unittest.main()", "validating #that key must be >= 0. Hence the test is skipped here", "test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc')", "self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def", "def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'),", "is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '),", "def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'),", "AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self):", "') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx", "self.assertMultiLineEqual(test.encrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.encrypt('abc'), 'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def')", "def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of key less than 0,", "'zyx def') #for values of key less than 0, JavaScript frontend is tasked", "test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt('", "'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu') if", "'zyx') def test_string_wrap_around(self): self.assertMultiLineEqual( test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for", "self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of key less than 0, JavaScript frontend", "caesar cipher class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '),", "be >= 0. Hence the test is skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self):", "class class AtbashCipherEncryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.encrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.encrypt(' '), ' ')", "skipped here class AtbashCipherDecryptTests(unittest.TestCase): def test_empty_string(self): self.assertMultiLineEqual(test.decrypt(''), '') def test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), '", "test.encrypt('wvu'), 'def') def test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of key less", "unittest from atbash_cipher import AtbashCipher test = AtbashCipher() #instantiate test caesar cipher class", "test_string_with_only_spaces(self): self.assertMultiLineEqual(test.decrypt(' '), ' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'),", "test_multi_word(self): self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def') #for values of key less than 0, JavaScript", "values of key less than 0, JavaScript frontend is tasked with validating #that", "' ') def test_string_no_wrap_around(self): self.assertMultiLineEqual(test.decrypt('zyx'), 'abc') def test_string_wrap_around(self): self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self):", "key less than 0, JavaScript frontend is tasked with validating #that key must", "self.assertMultiLineEqual( test.decrypt('def'), 'wvu') def test_multi_word(self): self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu') if __name__ == '__main__':" ]
[ "25 is 1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port", "firebase import RPi.GPIO as GPIO import plivo from time import sleep # this", "while True: if GPIO.input(25): print (\"Port 25 is 1/HIGH/True - LED ON\") GPIO.output(24,", "else: print (\"Port 25 is 0/LOW/False - LED OFF\") GPIO.output(24, 0) # set", "#firebase setup firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn():", "0/LOW/False - LED OFF\") GPIO.output(24, 0) # set port/pin value to 0/LOW/False sleep(0.1)", "try: while True: if GPIO.input(25): print (\"Port 25 is 1/HIGH/True - LED ON\")", "import firebase import RPi.GPIO as GPIO import plivo from time import sleep #", "number associated with your account dstPhoneNo=\"<mobile number>\" #phone number where you want to", "numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup", "as GPIO import plivo from time import sleep # this lets us have", "as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated with", "up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24,", "get on plivio app auth_token=\"<token>\" #auth_token which you will get on plivio app", "delay (see line 15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25, GPIO.IN)", "LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False -", "and auth_token from plivo console auth_id=\"<id>\" #auth_id key which you will get on", "setup srcPhoneNo=\"<mobile number>\" #phone number associated with your account dstPhoneNo=\"<mobile number>\" #phone number", "import plivo from time import sleep # this lets us have a time", "auth_token from plivo console auth_id=\"<id>\" #auth_id key which you will get on plivio", "LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True:", "if GPIO.input(25): print (\"Port 25 is 1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn()", "15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25", "to send sms smsText= u\"leds glow\" #message which you want to send msgObj={", "lets us have a time delay (see line 15) GPIO.setmode(GPIO.BCM) # set up", "(\"Port 25 is 0/LOW/False - LED OFF\") GPIO.output(24, 0) # set port/pin value", "firebase import firebase import RPi.GPIO as GPIO import plivo from time import sleep", "GPIO import plivo from time import sleep # this lets us have a", "srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id and auth_token from plivo console auth_id=\"<id>\"", "to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id and auth_token", "key which you will get on plivio app auth_token=\"<token>\" #auth_token which you will", "'text': smsText} #get auth_id and auth_token from plivo console auth_id=\"<id>\" #auth_id key which", "import RPi.GPIO as GPIO import plivo from time import sleep # this lets", "RPi.GPIO as GPIO import plivo from time import sleep # this lets us", "(see line 15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) #", "number>\" #phone number associated with your account dstPhoneNo=\"<mobile number>\" #phone number where you", "is 1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25", "'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id and auth_token from plivo console", "plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False;", "auth_id and auth_token from plivo console auth_id=\"<id>\" #auth_id key which you will get", "plivo from time import sleep # this lets us have a time delay", "input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated with your", "= plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None)", "LedOn=False try: while True: if GPIO.input(25): print (\"Port 25 is 1/HIGH/True - LED", "which you will get on plivio app auth_token=\"<token>\" #auth_token which you will get", "GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False - LED OFF\")", "= firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") :", "plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase link>' fBase", "GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as", "GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated", "GPIO.input(25): print (\"Port 25 is 1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'})", "us have a time delay (see line 15) GPIO.setmode(GPIO.BCM) # set up BCM", "fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\")", "BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT)", "# set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone", "plivio app auth_token=\"<token>\" #auth_token which you will get on plivio app pSMS =", "auth_token) #firebase setup firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def", "firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def", "you want to send sms smsText= u\"leds glow\" #message which you want to", "dstPhoneNo, 'text': smsText} #get auth_id and auth_token from plivo console auth_id=\"<id>\" #auth_id key", "25 is 0/LOW/False - LED OFF\") GPIO.output(24, 0) # set port/pin value to", "GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated with your account dstPhoneNo=\"<mobile", "GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\"", "which you will get on plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup", "True: if GPIO.input(25): print (\"Port 25 is 1/HIGH/True - LED ON\") GPIO.output(24, 1)", "get on plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase", "LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try:", "set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number", "send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id and auth_token from", "on plivio app auth_token=\"<token>\" #auth_token which you will get on plivio app pSMS", "print (\"Port 25 is 0/LOW/False - LED OFF\") GPIO.output(24, 0) # set port/pin", "print (\"Port 25 is 1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else:", "LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False - LED OFF\") GPIO.output(24, 0)", "#message which you want to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText}", "print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if", "u\"leds glow\" #message which you want to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo,", "- LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False", "your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True", "GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile", "app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase link>' fBase =", "a time delay (see line 15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering", "time import sleep # this lets us have a time delay (see line", "#get auth_id and auth_token from plivo console auth_id=\"<id>\" #auth_id key which you will", "LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25): print", "#phone number where you want to send sms smsText= u\"leds glow\" #message which", "you will get on plivio app auth_token=\"<token>\" #auth_token which you will get on", "#plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated with your account dstPhoneNo=\"<mobile number>\" #phone", "from plivo console auth_id=\"<id>\" #auth_id key which you will get on plivio app", "setup firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\"))", "will get on plivio app auth_token=\"<token>\" #auth_token which you will get on plivio", "line 15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set", "auth_token=\"<token>\" #auth_token which you will get on plivio app pSMS = plivo.RestAPI(auth_id, auth_token)", "pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL,", "'dst': dstPhoneNo, 'text': smsText} #get auth_id and auth_token from plivo console auth_id=\"<id>\" #auth_id", "on plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your firebase link>'", "LED OFF\") GPIO.output(24, 0) # set port/pin value to 0/LOW/False sleep(0.1) finally: GPIO.cleanup()", "have a time delay (see line 15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO", "plivo console auth_id=\"<id>\" #auth_id key which you will get on plivio app auth_token=\"<token>\"", "GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #plivo", "1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is", "(button) GPIO.setup(24, GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated with your account", "sms smsText= u\"leds glow\" #message which you want to send msgObj={ 'src': srcPhoneNo,", "(\"Port 25 is 1/HIGH/True - LED ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print", "None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False", "where you want to send sms smsText= u\"leds glow\" #message which you want", "1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False - LED OFF\") GPIO.output(24,", "from firebase import firebase import RPi.GPIO as GPIO import plivo from time import", "import sleep # this lets us have a time delay (see line 15)", "auth_id=\"<id>\" #auth_id key which you will get on plivio app auth_token=\"<token>\" #auth_token which", "print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25): print (\"Port 25 is 1/HIGH/True -", "- LED OFF\") GPIO.output(24, 0) # set port/pin value to 0/LOW/False sleep(0.1) finally:", "which you want to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get", "#phone number associated with your account dstPhoneNo=\"<mobile number>\" #phone number where you want", "GPIO.OUT) #plivo setup srcPhoneNo=\"<mobile number>\" #phone number associated with your account dstPhoneNo=\"<mobile number>\"", "associated with your account dstPhoneNo=\"<mobile number>\" #phone number where you want to send", "want to send sms smsText= u\"leds glow\" #message which you want to send", "smsText} #get auth_id and auth_token from plivo console auth_id=\"<id>\" #auth_id key which you", "#auth_id key which you will get on plivio app auth_token=\"<token>\" #auth_token which you", "#result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False - LED OFF\") GPIO.output(24, 0) #", "with your account dstPhoneNo=\"<mobile number>\" #phone number where you want to send sms", "send sms smsText= u\"leds glow\" #message which you want to send msgObj={ 'src':", "time delay (see line 15) GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25,", ": print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25): print (\"Port 25 is 1/HIGH/True", "number>\" #phone number where you want to send sms smsText= u\"leds glow\" #message", "your account dstPhoneNo=\"<mobile number>\" #phone number where you want to send sms smsText=", "print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25):", "firebaseURL='< your firebase link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj))", "account dstPhoneNo=\"<mobile number>\" #phone number where you want to send sms smsText= u\"leds", "#auth_token which you will get on plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase", "# set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input", "will get on plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='< your", "from time import sleep # this lets us have a time delay (see", "smsText= u\"leds glow\" #message which you want to send msgObj={ 'src': srcPhoneNo, 'dst':", "firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\"))", "def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25): print (\"Port", "glow\" #message which you want to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text':", "you will get on plivio app pSMS = plivo.RestAPI(auth_id, auth_token) #firebase setup firebaseURL='<", "def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while", "app auth_token=\"<token>\" #auth_token which you will get on plivio app pSMS = plivo.RestAPI(auth_id,", "link>' fBase = firebase.FirebaseApplication(firebaseURL, None) LedOn=False; def LedOn(): print(fBase.put('/data/user_1/',\"LedOn\",\"1\")) print(pSMS.send_message(msgObj)) LedOn=True def LedOff():", "if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25): print (\"Port 25 is", "sleep # this lets us have a time delay (see line 15) GPIO.setmode(GPIO.BCM)", "this lets us have a time delay (see line 15) GPIO.setmode(GPIO.BCM) # set", "srcPhoneNo=\"<mobile number>\" #phone number associated with your account dstPhoneNo=\"<mobile number>\" #phone number where", "set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button)", "number where you want to send sms smsText= u\"leds glow\" #message which you", "msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id and auth_token from plivo", "<filename>Pi-SMS.py from firebase import firebase import RPi.GPIO as GPIO import plivo from time", "# this lets us have a time delay (see line 15) GPIO.setmode(GPIO.BCM) #", "console auth_id=\"<id>\" #auth_id key which you will get on plivio app auth_token=\"<token>\" #auth_token", "LedOff(): if(fBase.get('/data/user_1/','LedOn')==\"1\") : print(fBase.put('/data/user_1/',\"LedOffd\",\"0\")) LedOn=False try: while True: if GPIO.input(25): print (\"Port 25", "dstPhoneNo=\"<mobile number>\" #phone number where you want to send sms smsText= u\"leds glow\"", "is 0/LOW/False - LED OFF\") GPIO.output(24, 0) # set port/pin value to 0/LOW/False", "you want to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id", "want to send msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText} #get auth_id and", "ON\") GPIO.output(24, 1) LedOn() #result=firebase.put('/data/','user_1',{'gasleakage':'1'}) else: print (\"Port 25 is 0/LOW/False - LED" ]
[ "\"\"\" class VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args, **kwargs): \"\"\" Make sure", "typing import Protocol class VEnvProtocol(Protocol): path = None def activate(self): \"\"\" Activate virtual", "VEnvProtocol(Protocol): path = None def activate(self): \"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol):", "is exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def add_to_exec_chain(self, activate_str): pass def", "from typing import Protocol class VEnvProtocol(Protocol): path = None def activate(self): \"\"\" Activate", "None def validate(self, *args, **kwargs): \"\"\" Make sure that venv is exist \"\"\"", "validate(self, *args, **kwargs): \"\"\" Make sure that venv is exist \"\"\" def get_activate_string(self):", "VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args, **kwargs): \"\"\" Make sure that venv", "class VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args, **kwargs): \"\"\" Make sure that", "\"\"\" Make sure that venv is exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol:", "Make sure that venv is exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def", "Protocol class VEnvProtocol(Protocol): path = None def activate(self): \"\"\" Activate virtual environment \"\"\"", "exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def add_to_exec_chain(self, activate_str): pass def exec(self):", "\"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args,", "Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args, **kwargs):", "environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args, **kwargs): \"\"\" Make", "activate(self): \"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None def validate(self,", "= None def activate(self): \"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager =", "*args, **kwargs): \"\"\" Make sure that venv is exist \"\"\" def get_activate_string(self): pass", "_manager = None def validate(self, *args, **kwargs): \"\"\" Make sure that venv is", "path = None def activate(self): \"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager", "= None def validate(self, *args, **kwargs): \"\"\" Make sure that venv is exist", "venv is exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def add_to_exec_chain(self, activate_str): pass", "\"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def add_to_exec_chain(self, activate_str): pass def exec(self): pass", "that venv is exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def add_to_exec_chain(self, activate_str):", "None def activate(self): \"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None", "sure that venv is exist \"\"\" def get_activate_string(self): pass class ShellExecutorProtocol: def add_to_exec_chain(self,", "class VEnvProtocol(Protocol): path = None def activate(self): \"\"\" Activate virtual environment \"\"\" class", "def validate(self, *args, **kwargs): \"\"\" Make sure that venv is exist \"\"\" def", "virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None def validate(self, *args, **kwargs): \"\"\"", "import Protocol class VEnvProtocol(Protocol): path = None def activate(self): \"\"\" Activate virtual environment", "def activate(self): \"\"\" Activate virtual environment \"\"\" class VEnvTypeProtocol(Protocol): _manager = None def", "**kwargs): \"\"\" Make sure that venv is exist \"\"\" def get_activate_string(self): pass class" ]
[ "[\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15", "form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert form.a._value()", "assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form)", "from wtforms.form import Form class F(Form): a = IntegerField() b = IntegerField(default=48) def", "form.a.raw_data == [] assert form.b.data is None assert form.b.raw_data == [\"\"] assert not", "== \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form =", "len(form.b.process_errors) == 1 assert len(form.b.errors) == 1 form = F(b=9) assert form.b.data ==", "form.a._value() == \"\" assert form.b._value() == \"\" assert not form.validate() assert len(form.b.process_errors) ==", "assert form.a.raw_data == [] assert form.b.data is None assert form.b.raw_data == [\"\"] assert", "wtforms.fields import IntegerField from wtforms.form import Form class F(Form): a = IntegerField() b", "= F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert form.a._value() == \"\" assert form.b._value()", "assert form.b.data == 9 assert form.a._value() == \"\" assert form.b._value() == \"9\" form", "form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert form.a.raw_data", "assert form.b.data is None assert form.b.raw_data == [\"\"] assert not form.validate() assert len(form.b.process_errors)", "form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert form.a._value() == \"\" assert", "import Form class F(Form): a = IntegerField() b = IntegerField(default=48) def test_integer_field(): form", "assert len(form.b.errors) == 1 form = F(b=9) assert form.b.data == 9 assert form.a._value()", "tests.common import DummyPostData from wtforms.fields import IntegerField from wtforms.form import Form class F(Form):", "import IntegerField from wtforms.form import Form class F(Form): a = IntegerField() b =", "== \"\" assert form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is", "assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1 form = F(b=9) assert form.b.data", "IntegerField from wtforms.form import Form class F(Form): a = IntegerField() b = IntegerField(default=48)", "F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data == [\"v\"] assert form.a() ==", "[\"\"] assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1 form", "assert form.a.data is None assert form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input id=\"a\"", "== \"\" assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1", "id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"]))", "assert form.a._value() == \"\" assert form.b._value() == \"\" assert not form.validate() assert len(form.b.process_errors)", "F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert form.a.raw_data == [] assert form.b.data is", "None assert form.b.raw_data == [\"\"] assert not form.validate() assert len(form.b.process_errors) == 1 assert", "None assert form.a.raw_data == [] assert form.b.data is None assert form.b.raw_data == [\"\"]", "b = IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None", "assert form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert", "form.a.data is None assert form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\"", "\"\" assert form.b._value() == \"\" assert not form.validate() assert len(form.b.process_errors) == 1 assert", "def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data ==", "test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data == [\"v\"]", "value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is", "== [\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data ==", "from wtforms.fields import IntegerField from wtforms.form import Form class F(Form): a = IntegerField()", "== \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert form.a._value() ==", "IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data", "form.b._value() == \"\" assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) ==", "[] assert form.b.data is None assert form.b.raw_data == [\"\"] assert not form.validate() assert", "1 form = F(b=9) assert form.b.data == 9 assert form.a._value() == \"\" assert", "IntegerField() b = IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is", "== \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert form.b() ==", "data=dict(b=\"v\")) assert form.b.data is None assert form.a._value() == \"\" assert form.b._value() == \"\"", "is None assert form.a._value() == \"\" assert form.b._value() == \"\" assert not form.validate()", "== -15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form)", "class F(Form): a = IntegerField() b = IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"],", "b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input", "9 assert form.a._value() == \"\" assert form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\"))", "== [] assert form.b.data is None assert form.b.raw_data == [\"\"] assert not form.validate()", "\"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert form.b() == \"\"\"<input", "== 9 assert form.a._value() == \"\" assert form.b._value() == \"9\" form = F(DummyPostData(),", "\"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert form.a._value() == \"\"", "assert form.a._value() == \"\" assert form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert", "Form class F(Form): a = IntegerField() b = IntegerField(default=48) def test_integer_field(): form =", "form.b.data is None assert form.b.raw_data == [\"\"] assert not form.validate() assert len(form.b.process_errors) ==", "form.b.data == 9 assert form.a._value() == \"\" assert form.b._value() == \"9\" form =", "b=[\"\"])) assert form.a.data is None assert form.a.raw_data == [] assert form.b.data is None", "\"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[],", "assert form.b.data is None assert form.a._value() == \"\" assert form.b._value() == \"\" assert", "1 assert len(form.b.errors) == 1 form = F(b=9) assert form.b.data == 9 assert", "not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert", "assert form.b.raw_data == [\"\"] assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors)", "is None assert form.b.raw_data == [\"\"] assert not form.validate() assert len(form.b.process_errors) == 1", "= F(b=9) assert form.b.data == 9 assert form.a._value() == \"\" assert form.b._value() ==", "id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert form.b() == \"\"\"<input id=\"b\"", "name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert", "== 1 assert len(form.b.errors) == 1 form = F(b=9) assert form.b.data == 9", "assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert form.a.raw_data ==", "form.b.data is None assert form.a._value() == \"\" assert form.b._value() == \"\" assert not", "F(b=9) assert form.b.data == 9 assert form.a._value() == \"\" assert form.b._value() == \"9\"", "assert not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None", "type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data", "form = F(b=9) assert form.b.data == 9 assert form.a._value() == \"\" assert form.b._value()", "import DummyPostData from wtforms.fields import IntegerField from wtforms.form import Form class F(Form): a", "is None assert form.a.raw_data == [] assert form.b.data is None assert form.b.raw_data ==", "value=\"v\">\"\"\" assert form.b.data == -15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\"", "form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert form.b.validate(form) form", "wtforms.form import Form class F(Form): a = IntegerField() b = IntegerField(default=48) def test_integer_field():", "form.b.validate(form) form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert form.a.raw_data == []", "not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1 form = F(b=9)", "assert form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert", "form = F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert form.a.raw_data == [] assert", "form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data == [\"v\"] assert", "\"\" assert form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None", "form.a.data is None assert form.a.raw_data == [] assert form.b.data is None assert form.b.raw_data", "== [\"\"] assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1", "a = IntegerField() b = IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert", "type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\"", "assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert", "= F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert form.a.raw_data == [\"v\"] assert form.a()", "name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\"", "= IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data is None assert", "F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data is None assert form.a._value() == \"\" assert form.b._value() ==", "== 1 form = F(b=9) assert form.b.data == 9 assert form.a._value() == \"\"", "None assert form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\"", "None assert form.a._value() == \"\" assert form.b._value() == \"\" assert not form.validate() assert", "-15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not form.a.validate(form) assert", "form.a._value() == \"\" assert form.b._value() == \"9\" form = F(DummyPostData(), data=dict(b=\"v\")) assert form.b.data", "F(Form): a = IntegerField() b = IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"]))", "assert form.b.data == -15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert", "form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1 form = F(b=9) assert", "== \"\" assert form.b._value() == \"\" assert not form.validate() assert len(form.b.process_errors) == 1", "= F(DummyPostData(a=[], b=[\"\"])) assert form.a.data is None assert form.a.raw_data == [] assert form.b.data", "is None assert form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\"", "form.b.raw_data == [\"\"] assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) ==", "assert form.a.data is None assert form.a.raw_data == [] assert form.b.data is None assert", "form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data == -15 assert form.b()", "from tests.common import DummyPostData from wtforms.fields import IntegerField from wtforms.form import Form class", "form.a.raw_data == [\"v\"] assert form.a() == \"\"\"<input id=\"a\" name=\"a\" type=\"number\" value=\"v\">\"\"\" assert form.b.data", "assert form.b._value() == \"\" assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors)", "form.b.data == -15 assert form.b() == \"\"\"<input id=\"b\" name=\"b\" type=\"number\" value=\"-15\">\"\"\" assert not", "assert not form.validate() assert len(form.b.process_errors) == 1 assert len(form.b.errors) == 1 form =", "len(form.b.errors) == 1 form = F(b=9) assert form.b.data == 9 assert form.a._value() ==", "DummyPostData from wtforms.fields import IntegerField from wtforms.form import Form class F(Form): a =", "= IntegerField() b = IntegerField(default=48) def test_integer_field(): form = F(DummyPostData(a=[\"v\"], b=[\"-15\"])) assert form.a.data" ]
[ "raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except", "except Exception as e: if 'not enough space' in str(e): raise NotEnoughSpaceException() raise", "file.read() return data def get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level =", "% (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell am start", "= os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with open(path, 'r') as file:", "pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err)) return", "package_name + '/' + activity_name cmd = \"{0} shell am start -n {1}\".format(config.ADB_PATH,", "package [%s]...\" % (activity_name, package_name)) run_string = package_name + '/' + activity_name cmd", "\"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat -d *:E >", "'adb shell monkey -p {} -s {} --throttle {} {}' request_pipe(cmd.format(package, seed, throttle,", "file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with open(path, 'r') as", "def read_log(path): with open(path, 'r') as file: data = file.read() return data def", "err if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out,", "ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH,", "app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with open(path, 'r')", "err = pipe.communicate() res = out if not out: res = err if", "cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def run_monkey(package,", "os def install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out =", "cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat -d", "try: out = request_pipe(cmd) except Exception as e: if 'not enough space' in", "event_num): cmd = 'adb shell monkey -p {} -s {} --throttle {} {}'", "[%s] of the package [%s]...\" % (activity_name, package_name)) run_string = package_name + '/'", "\"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except Exception as e: if 'not enough", "as e: if 'not enough space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if", "-c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path)", "= '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd):", "raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while dumping' in out: raise ErrorUninstallingException", "of the package [%s]...\" % (activity_name, package_name)) run_string = package_name + '/' +", "request_pipe(cmd) def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd =", "def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise", "= \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat -d *:E", "subprocess from modules import config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import", "if 'not enough space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred", "request_pipe(cmd) except Exception as e: if 'not enough space' in str(e): raise NotEnoughSpaceException()", "throttle, event_num): cmd = 'adb shell monkey -p {} -s {} --throttle {}", "run_string) request_pipe(cmd) def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd", "package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,", "modules import config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def", "def dump_log(path): cmd = \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def", "= 'adb shell monkey -p {} -s {} --throttle {} {}' request_pipe(cmd.format(package, seed,", "-n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def", "shell=True) out, err = pipe.communicate() res = out if not out: res =", "in out: raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try:", "out if not out: res = err if pipe.returncode > 0 : raise", "activity_name): # adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the", "# adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package", "activity [%s] of the package [%s]...\" % (activity_name, package_name)) run_string = package_name +", "logger.debug(\"Starting activity [%s] of the package [%s]...\" % (activity_name, package_name)) run_string = package_name", "int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle, event_num): cmd = 'adb shell monkey", "raise ErrorInstallingException if 'Exception occurred while dumping' in out: raise ErrorUninstallingException def uninstall(package):", "res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity", "-d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app))", "AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd = '\"{}\" install -r", "except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out,", "dumping' in out: raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package)", "seed, throttle, event_num): cmd = 'adb shell monkey -p {} -s {} --throttle", "str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while dumping' in out: raise", "\"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with open(path, 'r') as file: data =", "not out: res = err if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out:", "ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate()", "ErrorInstallingException if 'Exception occurred while dumping' in out: raise ErrorUninstallingException def uninstall(package): cmd", "%s\\nError: %s\" % (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell", "getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle, event_num): cmd", "return api_level def run_monkey(package, seed, throttle, event_num): cmd = 'adb shell monkey -p", ": raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name,", "= err if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" %", "% (activity_name, package_name)) run_string = package_name + '/' + activity_name cmd = \"{0}", "'r') as file: data = file.read() return data def get_api_level(): cmd = \"{}", "new_apk_path) try: out = request_pipe(cmd) except Exception as e: if 'not enough space'", "0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err)) return res.decode('utf-8') def", "[%s]...\" % (activity_name, package_name)) run_string = package_name + '/' + activity_name cmd =", "= package_name + '/' + activity_name cmd = \"{0} shell am start -n", "cmd = \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app):", "os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with open(path, 'r') as file: data", "import config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path):", "shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle, event_num):", "def run_monkey(package, seed, throttle, event_num): cmd = 'adb shell monkey -p {} -s", "if not out: res = err if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\", "out: res = err if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError:", "in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while dumping' in out:", "(activity_name, package_name)) run_string = package_name + '/' + activity_name cmd = \"{0} shell", "res = err if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\"", "request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd)", "am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH)", "the package [%s]...\" % (activity_name, package_name)) run_string = package_name + '/' + activity_name", "config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd", "return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting", "dump_log(file_path) return file_path def read_log(path): with open(path, 'r') as file: data = file.read()", "'not enough space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while", "import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd = '\"{}\" install", "shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package [%s]...\" %", "enough space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while dumping'", "request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)", "am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package [%s]...\" % (activity_name,", "clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat", "cmd = \"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd", "shell monkey -p {} -s {} --throttle {} {}' request_pipe(cmd.format(package, seed, throttle, event_num))", "e: if 'not enough space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception", "adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package [%s]...\"", "package_name)) run_string = package_name + '/' + activity_name cmd = \"{0} shell am", "= \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path", "file_path def read_log(path): with open(path, 'r') as file: data = file.read() return data", "get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def", "+ activity_name cmd = \"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def", "api_level = int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle, event_num): cmd = 'adb", "read_log(path): with open(path, 'r') as file: data = file.read() return data def get_api_level():", "ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path)", "as file: data = file.read() return data def get_api_level(): cmd = \"{} shell", "data = file.read() return data def get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH)", "Out: %s\\nError: %s\" % (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb", "logger import subprocess from modules import config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException,", "+ '/' + activity_name cmd = \"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string)", "= subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out if", "try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "return file_path def read_log(path): with open(path, 'r') as file: data = file.read() return", "= \"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd =", "= int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle, event_num): cmd = 'adb shell", "def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res", "out, err = pipe.communicate() res = out if not out: res = err", "= out if not out: res = err if pipe.returncode > 0 :", "= '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except Exception as", "(out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell am start -n", "import subprocess from modules import config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException", "'/' + activity_name cmd = \"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd)", "with open(path, 'r') as file: data = file.read() return data def get_api_level(): cmd", "'\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except Exception as e:", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out if not out:", "\"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd = \"{0}", "return data def get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd))", "= \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def run_monkey(package, seed,", "Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err", "modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd = '\"{}\"", "\"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd,", "install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except Exception as e: if", "import logger import subprocess from modules import config from modules.exceptions import AbsentPackageException, ErrorInstallingException,", "<filename>modules/shellhelper.py from loguru import logger import subprocess from modules import config from modules.exceptions", "= request_pipe(cmd) except Exception as e: if 'not enough space' in str(e): raise", "{1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path):", "from modules import config from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os", "request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res =", "cmd = 'adb shell monkey -p {} -s {} --throttle {} {}' request_pipe(cmd.format(package,", "dump_log(path): cmd = \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir,", "'Exception occurred while dumping' in out: raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\"", "'\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe", "import os def install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out", "run_monkey(package, seed, throttle, event_num): cmd = 'adb shell monkey -p {} -s {}", "open(path, 'r') as file: data = file.read() return data def get_api_level(): cmd =", "NotEnoughSpaceException import os def install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try:", "cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except Exception", "res = out if not out: res = err if pipe.returncode > 0", "\"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle,", "-n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package [%s]...\" % (activity_name, package_name)) run_string", "{1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path", "uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException", "Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): #", "uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def request_pipe(cmd): pipe =", "logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0} logcat -d *:E > {1}\".format(config.ADB_PATH,", "ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level def run_monkey(package, seed, throttle, event_num): cmd =", "= pipe.communicate() res = out if not out: res = err if pipe.returncode", "from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException import os def install(new_apk_path): cmd =", "install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except", "while dumping' in out: raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH,", "loguru import logger import subprocess from modules import config from modules.exceptions import AbsentPackageException,", "def get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return api_level", "if pipe.returncode > 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err))", "def install(new_apk_path): cmd = '\"{}\" install -r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd)", "> 0 : raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err)) return res.decode('utf-8')", "save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with open(path,", "\"{0} logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path =", "start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd)", "*:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path)", "activity_name cmd = \"{0} shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log():", "-r \"{}\"'.format(config.ADB_PATH, new_apk_path) try: out = request_pipe(cmd) except Exception as e: if 'not", "logcat -d *:E > {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir,", "out = request_pipe(cmd) except Exception as e: if 'not enough space' in str(e):", "err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell am start -n com.package.name/com.package.name.ActivityName", "start_activity_explicitly(package_name, activity_name): # adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of", "pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out", "def start_activity_explicitly(package_name, activity_name): # adb shell am start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s]", "api_level def run_monkey(package, seed, throttle, event_num): cmd = 'adb shell monkey -p {}", "stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out if not out: res", "start -n com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package [%s]...\" % (activity_name, package_name))", "> {1}\".format(config.ADB_PATH, path) request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return", "raise ErrorUninstallingException def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err =", "pipe.communicate() res = out if not out: res = err if pipe.returncode >", "space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while dumping' in", "Exception as e: if 'not enough space' in str(e): raise NotEnoughSpaceException() raise ErrorInstallingException", "occurred while dumping' in out: raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall", "file: data = file.read() return data def get_api_level(): cmd = \"{} shell getprop", "path) request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def", "ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception:", "NotEnoughSpaceException() raise ErrorInstallingException if 'Exception occurred while dumping' in out: raise ErrorUninstallingException def", "def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path): with", "run_string = package_name + '/' + activity_name cmd = \"{0} shell am start", "if 'Exception occurred while dumping' in out: raise ErrorUninstallingException def uninstall(package): cmd =", "from loguru import logger import subprocess from modules import config from modules.exceptions import", "%s\" % (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name): # adb shell am", "com.package.name/com.package.name.ActivityName logger.debug(\"Starting activity [%s] of the package [%s]...\" % (activity_name, package_name)) run_string =", "raise Exception(\"----------------------------------------------------\\n\\ Out: %s\\nError: %s\" % (out, err)) return res.decode('utf-8') def start_activity_explicitly(package_name, activity_name):", "cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd) except Exception: raise ErrorUninstallingException def", "subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out if not", "request_pipe(cmd) def save_log(logs_dir, app): file_path = os.path.join(logs_dir, \"{}.txt\".format(app)) dump_log(file_path) return file_path def read_log(path):", "out: raise ErrorUninstallingException def uninstall(package): cmd = '\"{}\" uninstall \"{}\"'.format(config.ADB_PATH, package) try: request_pipe(cmd)", "data def get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level = int(request_pipe(cmd)) return", "def clean_log(): cmd = \"{0} logcat -c\".format(config.ADB_PATH) request_pipe(cmd) def dump_log(path): cmd = \"{0}", "shell am start -n {1}\".format(config.ADB_PATH, run_string) request_pipe(cmd) def clean_log(): cmd = \"{0} logcat", "= file.read() return data def get_api_level(): cmd = \"{} shell getprop ro.build.version.sdk\".format(config.ADB_PATH) api_level" ]
[ "= K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train, epochs=epochs,", "import FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20):", "Path to output directory res: tuple Input resolution of network lazy: bool Whether", "str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search over", "resolution of network lazy: bool Whether to load data lazily in batches during", "in batches during training batch_size: int Batch size in case of lazy loading", "batch_size: int Batch size in case of lazy loading epochs: int Training epochs", "In eager loading mode, train on everything. if not lazy: X_train, y_train =", "on everything. if not lazy: X_train, y_train = train.get_all() X_test, y_test = test.get_all()", "import classification_report from dataset import FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120,", "if not lazy: X_train, y_train = train.get_all() X_test, y_test = test.get_all() X_train =", "net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if", "help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for", "validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\"))", "axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None,", "lazy loading epochs: int Training epochs \"\"\" # Data print(\"=> Loading data.\") train", "model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else:", "test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading mode, train on everything.", "epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output,", "argparse import numpy as np import tensorflow as tf import tensorflow.keras as K", "= FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading mode, train on everything. if", "\"\"\" Runs a grid search over all known models. Params ------ train_labels: str", "Input resolution of network lazy: bool Whether to load data lazily in batches", "Batch size in case of lazy loading epochs: int Training epochs \"\"\" #", "containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\")", "sklearn.metrics import classification_report from dataset import FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str,", "epochs \"\"\" # Data print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test", "batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading mode, train on", "tf import tensorflow.keras as K from sklearn.metrics import classification_report from dataset import FLIRDataset", "from sklearn.metrics import classification_report from dataset import FLIRDataset def grid_search(train_labels: str, test_labels: str,", "X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False,", "Runs a grid search over all known models. Params ------ train_labels: str Path", "# Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs,", "Training model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model =", "model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights", "= K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x", "x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120,", "known models. Params ------ train_labels: str Path to training labels test_labels: str Path", "K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160,", "K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor", "test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def net(x,", "output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search over all", "lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search over all known models. Params", "int Training epochs \"\"\" # Data print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res,", "batch_size=batch_size) # In eager loading mode, train on everything. if not lazy: X_train,", "eager loading mode, train on everything. if not lazy: X_train, y_train = train.get_all()", "Data print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res,", "= argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory", "import tensorflow as tf import tensorflow.keras as K from sklearn.metrics import classification_report from", "str Path to testing labels output: str Path to output directory res: tuple", "\"\"\" # Data print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test =", "Whether to load data lazily in batches during training batch_size: int Batch size", "# Data print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels,", "everything. if not lazy: X_train, y_train = train.get_all() X_test, y_test = test.get_all() X_train", "num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x)", "verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train", "\"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\")", "testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\",", "K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\",", "data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In", "K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train,", "weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training", "loading epochs: int Training epochs \"\"\" # Data print(\"=> Loading data.\") train =", "= K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\",", "120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])", "output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train", "parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of", "y_test], axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x)", "tensorflow.keras as K from sklearn.metrics import classification_report from dataset import FLIRDataset def grid_search(train_labels:", "print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model", "argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing", "action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"], res=(120, 160), lazy=bool(args[\"lazy\"]), epochs=int(args[\"epochs\"])) print(\"\\n=> Finished.\")", "training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\",", "\"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"], res=(120,", "res: tuple Input resolution of network lazy: bool Whether to load data lazily", "y_test = test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0)", "Params ------ train_labels: str Path to training labels test_labels: str Path to testing", "size in case of lazy loading epochs: int Training epochs \"\"\" # Data", "in case of lazy loading epochs: int Training epochs \"\"\" # Data print(\"=>", "axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x", "weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR", "lazy: X_train, y_train = train.get_all() X_test, y_test = test.get_all() X_train = np.concatenate([X_train, X_test],", "parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory", "import argparse import numpy as np import tensorflow as tf import tensorflow.keras as", "grid search over all known models. Params ------ train_labels: str Path to training", "= np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x", "input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\")", "loading mode, train on everything. if not lazy: X_train, y_train = train.get_all() X_test,", "lazy: bool Whether to load data lazily in batches during training batch_size: int", "lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"], res=(120, 160), lazy=bool(args[\"lazy\"]), epochs=int(args[\"epochs\"])) print(\"\\n=>", "batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search over all known models. Params ------", "tensorflow as tf import tensorflow.keras as K from sklearn.metrics import classification_report from dataset", "containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\",", "model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing", "labels output: str Path to output directory res: tuple Input resolution of network", "help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"], res=(120, 160), lazy=bool(args[\"lazy\"]),", "train on everything. if not lazy: X_train, y_train = train.get_all() X_test, y_test =", "y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\":", "parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load", "for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\")", "K from sklearn.metrics import classification_report from dataset import FLIRDataset def grid_search(train_labels: str, test_labels:", "numpy as np import tensorflow as tf import tensorflow.keras as K from sklearn.metrics", "Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) #", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing", "net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes,", "batches during training batch_size: int Batch size in case of lazy loading epochs:", "x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return", "model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\")", "help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\")", "not lazy: X_train, y_train = train.get_all() X_test, y_test = test.get_all() X_train = np.concatenate([X_train,", "metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train,", "test_labels: str Path to testing labels output: str Path to output directory res:", "data lazily in batches during training batch_size: int Batch size in case of", "of network lazy: bool Whether to load data lazily in batches during training", "activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor =", "np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x =", "labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number", "------ train_labels: str Path to training labels test_labels: str Path to testing labels", "= np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x", "x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor", "test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search", "from dataset import FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True,", "Training epochs \"\"\" # Data print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size)", "= K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120, 1))", "Path to testing labels output: str Path to output directory res: tuple Input", "parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args =", "FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\"", "epochs:int=20): \"\"\" Runs a grid search over all known models. Params ------ train_labels:", "num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy:", "batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser =", "lazily in batches during training batch_size: int Batch size in case of lazy", "case of lazy loading epochs: int Training epochs \"\"\" # Data print(\"=> Loading", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory", "np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x =", "output: str Path to output directory res: tuple Input resolution of network lazy:", "model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train,", "classification_report from dataset import FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160),", "training batch_size: int Batch size in case of lazy loading epochs: int Training", "= FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading", "K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=>", "= K.layers.Flatten()(x) x = K.layers.Dense(num_classes, activation=\"softmax\")(x) return x print(\"\\n=> Training model.\") input_tensor =", "\"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\",", "res=res, batch_size=batch_size) # In eager loading mode, train on everything. if not lazy:", "to output directory res: tuple Input resolution of network lazy: bool Whether to", "1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) #", "directory res: tuple Input resolution of network lazy: bool Whether to load data", "network lazy: bool Whether to load data lazily in batches during training batch_size:", "parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\",", "epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser", "search over all known models. Params ------ train_labels: str Path to training labels", "labels test_labels: str Path to testing labels output: str Path to output directory", "lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save", "as tf import tensorflow.keras as K from sklearn.metrics import classification_report from dataset import", "all known models. Params ------ train_labels: str Path to training labels test_labels: str", "epochs: int Training epochs \"\"\" # Data print(\"=> Loading data.\") train = FLIRDataset(train_labels,", "labels\") parser.add_argument(\"out\", help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\",", "data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"], res=(120, 160), lazy=bool(args[\"lazy\"]), epochs=int(args[\"epochs\"]))", "y_train = train.get_all() X_test, y_test = test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train", "help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args())", "output directory res: tuple Input resolution of network lazy: bool Whether to load", "import tensorflow.keras as K from sklearn.metrics import classification_report from dataset import FLIRDataset def", "help=\"Output directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data", "= net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model", "int Batch size in case of lazy loading epochs: int Training epochs \"\"\"", "160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search over all known models.", "np import tensorflow as tf import tensorflow.keras as K from sklearn.metrics import classification_report", "bool Whether to load data lazily in batches during training batch_size: int Batch", "FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading mode, train on everything. if not", "y_train = np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)", "mode, train on everything. if not lazy: X_train, y_train = train.get_all() X_test, y_test", "model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor,", "training labels test_labels: str Path to testing labels output: str Path to output", "testing labels output: str Path to output directory res: tuple Input resolution of", "Path to training labels test_labels: str Path to testing labels output: str Path", "loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train,", "# Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model", "Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on", "== \"__main__\": parser = argparse.ArgumentParser(description=\"Train model on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training", "print(\"=> Loading data.\") train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size)", "X_test, y_test = test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test],", "parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"],", "res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid search over all known", "a grid search over all known models. Params ------ train_labels: str Path to", "# In eager loading mode, train on everything. if not lazy: X_train, y_train", "x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes())", "output_tensor) model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]) # Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2)", "train.get_all() X_test, y_test = test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train,", "over all known models. Params ------ train_labels: str Path to training labels test_labels:", "if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) #", "grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a", "X_train = np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def net(x, num_classes=1):", "model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__ ==", "Train model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size,", "X_train, y_train = train.get_all() X_test, y_test = test.get_all() X_train = np.concatenate([X_train, X_test], axis=0)", "FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading mode,", "else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if __name__", "= train.get_all() X_test, y_test = test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train =", "str Path to training labels test_labels: str Path to testing labels output: str", "load data lazily in batches during training batch_size: int Batch size in case", "dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\", help=\"Output", "FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\") parser.add_argument(\"out\",", "results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args", "res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager loading mode, train", "dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"], args[\"out\"], res=(120, 160),", "on FLIR dataset.\") parser.add_argument(\"train\", help=\"Directory containing training labels\") parser.add_argument(\"test\", help=\"Directory containing testing labels\")", "model if lazy: model.fit(x=train, epochs=epochs, validation_data=train, verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2)", "train_labels: str Path to training labels test_labels: str Path to testing labels output:", "epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"], args[\"test\"],", "import os import argparse import numpy as np import tensorflow as tf import", "def net(x, num_classes=1): x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x) x = K.layers.Flatten()(x) x =", "as np import tensorflow as tf import tensorflow.keras as K from sklearn.metrics import", "= test.get_all() X_train = np.concatenate([X_train, X_test], axis=0) y_train = np.concatenate([y_train, y_test], axis=0) def", "of lazy loading epochs: int Training epochs \"\"\" # Data print(\"=> Loading data.\")", "to load data lazily in batches during training batch_size: int Batch size in", "str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs a grid", "during training batch_size: int Batch size in case of lazy loading epochs: int", "to testing labels output: str Path to output directory res: tuple Input resolution", "directory for results\") parser.add_argument(\"epochs\", help=\"Number of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\",", "as K from sklearn.metrics import classification_report from dataset import FLIRDataset def grid_search(train_labels: str,", "def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16, epochs:int=20): \"\"\" Runs", "return x print(\"\\n=> Training model.\") input_tensor = K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor,", "verbose=2) else: model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2) # Save weights model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\")) if", "tuple Input resolution of network lazy: bool Whether to load data lazily in", "import numpy as np import tensorflow as tf import tensorflow.keras as K from", "to training labels test_labels: str Path to testing labels output: str Path to", "models. Params ------ train_labels: str Path to training labels test_labels: str Path to", "dataset import FLIRDataset def grid_search(train_labels: str, test_labels: str, output:str, res:tuple=(120, 160), lazy:bool=True, batch_size:int=16,", "of epochs\") parser.add_argument(\"-l\", \"--lazy\", dest=\"lazy\", help=\"Load data lazily\", action=\"store_true\") args = vars(parser.parse_args()) grid_search(args[\"train\"],", "os import argparse import numpy as np import tensorflow as tf import tensorflow.keras", "str Path to output directory res: tuple Input resolution of network lazy: bool", "input_tensor = K.layers.Input((160, 120, 1)) output_tensor = net(input_tensor, num_classes=train.num_classes()) model = K.Model(input_tensor, output_tensor)", "train = FLIRDataset(train_labels, res=res, batch_size=batch_size) test = FLIRDataset(test_labels, res=res, batch_size=batch_size) # In eager" ]
[ "Entrez.email = \"<EMAIL>\" # The maximum number of search results to be displayed", "= input(\"enter your search query: \") filter_option = input(\"would you like to use", "the email must be the user's individual/personal email (NOT an institutional email or", "maximum number of search results to be displayed could be the following default", "< 100 results_number = 5 query = input(\"enter your search query: \") filter_option", "email (NOT an institutional email or a default email # as this could", "allowable frequency of requests per user, of 3 per second) Entrez.email = \"<EMAIL>\"", "(NOT an institutional email or a default email # as this could lead", "requests per user, of 3 per second) Entrez.email = \"<EMAIL>\" # The maximum", "of 3 per second) Entrez.email = \"<EMAIL>\" # The maximum number of search", "could be the following default value or an input value < 100 results_number", "following default value or an input value < 100 results_number = 5 query", "or a default email # as this could lead to exceeding the maximum", "input(\"enter your search query: \") filter_option = input(\"would you like to use advanced", "could lead to exceeding the maximum allowable frequency of requests per user, of", "the following default value or an input value < 100 results_number = 5", "an input value < 100 results_number = 5 query = input(\"enter your search", "inputs' values :return: query \"\"\" # the email must be the user's individual/personal", "input(\"would you like to use advanced search filter? (yes/no): \") if filter_option ==", ":return: query \"\"\" # the email must be the user's individual/personal email (NOT", "inputnow(): \"\"\" Reads the inputs' values :return: query \"\"\" # the email must", "\"\"\" Reads the inputs' values :return: query \"\"\" # the email must be", "to be displayed could be the following default value or an input value", "to read the query and other inputs \"\"\" from Bio import Entrez from", "search filter? (yes/no): \") if filter_option == \"yes\": query = filter_selector(query) return query,", "Bio import Entrez from filter import filter_selector def inputnow(): \"\"\" Reads the inputs'", "query: \") filter_option = input(\"would you like to use advanced search filter? (yes/no):", "filter import filter_selector def inputnow(): \"\"\" Reads the inputs' values :return: query \"\"\"", "email must be the user's individual/personal email (NOT an institutional email or a", "or an input value < 100 results_number = 5 query = input(\"enter your", "frequency of requests per user, of 3 per second) Entrez.email = \"<EMAIL>\" #", "use advanced search filter? (yes/no): \") if filter_option == \"yes\": query = filter_selector(query)", "query = input(\"enter your search query: \") filter_option = input(\"would you like to", "a default email # as this could lead to exceeding the maximum allowable", "read the query and other inputs \"\"\" from Bio import Entrez from filter", "from filter import filter_selector def inputnow(): \"\"\" Reads the inputs' values :return: query", "query and other inputs \"\"\" from Bio import Entrez from filter import filter_selector", "email # as this could lead to exceeding the maximum allowable frequency of", "maximum allowable frequency of requests per user, of 3 per second) Entrez.email =", "and other inputs \"\"\" from Bio import Entrez from filter import filter_selector def", "results_number = 5 query = input(\"enter your search query: \") filter_option = input(\"would", "3 per second) Entrez.email = \"<EMAIL>\" # The maximum number of search results", "\"\"\" Module to read the query and other inputs \"\"\" from Bio import", "\"\"\" from Bio import Entrez from filter import filter_selector def inputnow(): \"\"\" Reads", "= input(\"would you like to use advanced search filter? (yes/no): \") if filter_option", "Module to read the query and other inputs \"\"\" from Bio import Entrez", "\"<EMAIL>\" # The maximum number of search results to be displayed could be", "import filter_selector def inputnow(): \"\"\" Reads the inputs' values :return: query \"\"\" #", "100 results_number = 5 query = input(\"enter your search query: \") filter_option =", "this could lead to exceeding the maximum allowable frequency of requests per user,", "filter_option = input(\"would you like to use advanced search filter? (yes/no): \") if", "other inputs \"\"\" from Bio import Entrez from filter import filter_selector def inputnow():", "to exceeding the maximum allowable frequency of requests per user, of 3 per", "be the user's individual/personal email (NOT an institutional email or a default email", "the maximum allowable frequency of requests per user, of 3 per second) Entrez.email", "like to use advanced search filter? (yes/no): \") if filter_option == \"yes\": query", "= \"<EMAIL>\" # The maximum number of search results to be displayed could", "# as this could lead to exceeding the maximum allowable frequency of requests", "an institutional email or a default email # as this could lead to", "as this could lead to exceeding the maximum allowable frequency of requests per", "import Entrez from filter import filter_selector def inputnow(): \"\"\" Reads the inputs' values", "advanced search filter? (yes/no): \") if filter_option == \"yes\": query = filter_selector(query) return", "# the email must be the user's individual/personal email (NOT an institutional email", "value or an input value < 100 results_number = 5 query = input(\"enter", "The maximum number of search results to be displayed could be the following", "input value < 100 results_number = 5 query = input(\"enter your search query:", "the inputs' values :return: query \"\"\" # the email must be the user's", "default value or an input value < 100 results_number = 5 query =", "results to be displayed could be the following default value or an input", "per user, of 3 per second) Entrez.email = \"<EMAIL>\" # The maximum number", "second) Entrez.email = \"<EMAIL>\" # The maximum number of search results to be", "default email # as this could lead to exceeding the maximum allowable frequency", "def inputnow(): \"\"\" Reads the inputs' values :return: query \"\"\" # the email", "of requests per user, of 3 per second) Entrez.email = \"<EMAIL>\" # The", "\") filter_option = input(\"would you like to use advanced search filter? (yes/no): \")", "to use advanced search filter? (yes/no): \") if filter_option == \"yes\": query =", "per second) Entrez.email = \"<EMAIL>\" # The maximum number of search results to", "your search query: \") filter_option = input(\"would you like to use advanced search", "search query: \") filter_option = input(\"would you like to use advanced search filter?", "5 query = input(\"enter your search query: \") filter_option = input(\"would you like", "the query and other inputs \"\"\" from Bio import Entrez from filter import", "displayed could be the following default value or an input value < 100", "lead to exceeding the maximum allowable frequency of requests per user, of 3", "= 5 query = input(\"enter your search query: \") filter_option = input(\"would you", "values :return: query \"\"\" # the email must be the user's individual/personal email", "search results to be displayed could be the following default value or an", "\"\"\" # the email must be the user's individual/personal email (NOT an institutional", "number of search results to be displayed could be the following default value", "individual/personal email (NOT an institutional email or a default email # as this", "Reads the inputs' values :return: query \"\"\" # the email must be the", "institutional email or a default email # as this could lead to exceeding", "you like to use advanced search filter? (yes/no): \") if filter_option == \"yes\":", "filter_selector def inputnow(): \"\"\" Reads the inputs' values :return: query \"\"\" # the", "exceeding the maximum allowable frequency of requests per user, of 3 per second)", "user's individual/personal email (NOT an institutional email or a default email # as", "email or a default email # as this could lead to exceeding the", "must be the user's individual/personal email (NOT an institutional email or a default", "of search results to be displayed could be the following default value or", "Entrez from filter import filter_selector def inputnow(): \"\"\" Reads the inputs' values :return:", "value < 100 results_number = 5 query = input(\"enter your search query: \")", "from Bio import Entrez from filter import filter_selector def inputnow(): \"\"\" Reads the", "user, of 3 per second) Entrez.email = \"<EMAIL>\" # The maximum number of", "# The maximum number of search results to be displayed could be the", "be displayed could be the following default value or an input value <", "the user's individual/personal email (NOT an institutional email or a default email #", "inputs \"\"\" from Bio import Entrez from filter import filter_selector def inputnow(): \"\"\"", "filter? (yes/no): \") if filter_option == \"yes\": query = filter_selector(query) return query, results_number", "be the following default value or an input value < 100 results_number =", "query \"\"\" # the email must be the user's individual/personal email (NOT an" ]
[ "DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not", "join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH):", "<gh_stars>10-100 from os import makedirs from os.path import isdir, join, expanduser DATA_PATH =", "not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB =", "\"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT =", "[] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\": 8181, \"route\": \"/core\", \"ssl\": False }", "isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH,", "5678 USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\":", "if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB", "join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT", "DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH,", "makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True", "= join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\")", "expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH)", "= join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not", "LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\": 8181, \"route\": \"/core\", \"ssl\":", "True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\": 8181, \"route\": \"/core\",", "from os import makedirs from os.path import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\")", "= expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH):", "USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\": 8181,", "not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH =", "CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if", "isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\"", "isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL =", "isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH,", "+ join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG", "if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH", "import makedirs from os.path import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not", "\"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH)", "\"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST = []", "DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\":", "expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if", "os import makedirs from os.path import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if", "join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG =", "= [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\": 8181, \"route\": \"/core\", \"ssl\": False", "CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST", "= 5678 USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\",", "import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH =", "makedirs from os.path import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH):", "\"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = {", "join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\")", "= \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL = True LOG_BLACKLIST =", "not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678 USE_SSL", "if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" + join(DB_PATH, \"clients.db\") DEFAULT_PORT = 5678", "from os.path import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH)", "os.path import isdir, join, expanduser DATA_PATH = expanduser(\"~/jarbasHiveMind\") if not isdir(DATA_PATH): makedirs(DATA_PATH) CERTS_PATH", "makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\") if not isdir(DB_PATH): makedirs(DB_PATH) CLIENTS_DB = \"sqlite:///\" +", "makedirs(DATA_PATH) CERTS_PATH = join(DATA_PATH, \"certs\") if not isdir(CERTS_PATH): makedirs(CERTS_PATH) DB_PATH = join(DATA_PATH, \"database\")", "= True LOG_BLACKLIST = [] MYCROFT_WEBSOCKET_CONFIG = { \"host\": \"0.0.0.0\", \"port\": 8181, \"route\":" ]
[ "line2 = l2[i].split() val = (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] + '", "super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res = open('res','w')", "open(file2_name, 'r').readlines() res = open('res','w') def run(): for i in range(len(l1)): line1 =", "= 'a' file2_name = 'titanic' class Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item,", "range(len(l1)): line1 = l1[i].split() line2 = l2[i].split() val = (int(line1[3]) + int(line2[3])) /", "python file1_name = 'a' file2_name = 'titanic' class Item(object): \"\"\"tring for Item\"\"\" def", "for i in range(len(l1)): line1 = l1[i].split() line2 = l2[i].split() val = (int(line1[3])", "def run(): for i in range(len(l1)): line1 = l1[i].split() line2 = l2[i].split() val", "+ line1[1] + ' ' + line1[2] + ' ' + str(val) +", "l2 = open(file2_name, 'r').readlines() res = open('res','w') def run(): for i in range(len(l1)):", "= open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res = open('res','w') def run(): for", "+ ' ' + line1[1] + ' ' + line1[2] + ' '", "file1_name = 'a' file2_name = 'titanic' class Item(object): \"\"\"tring for Item\"\"\" def __init__(self):", "i in range(len(l1)): line1 = l1[i].split() line2 = l2[i].split() val = (int(line1[3]) +", "'a' file2_name = 'titanic' class Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__()", "class Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines()", "= l1[i].split() line2 = l2[i].split() val = (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0]", "' + line1[2] + ' ' + str(val) + ' \\n') run() res.close()", "' + line1[1] + ' ' + line1[2] + ' ' + str(val)", "#!/usr/bin/env python file1_name = 'a' file2_name = 'titanic' class Item(object): \"\"\"tring for Item\"\"\"", "file2_name = 'titanic' class Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__() l1", "2 res.writelines(line1[0] + ' ' + line1[1] + ' ' + line1[2] +", "\"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2 =", "res = open('res','w') def run(): for i in range(len(l1)): line1 = l1[i].split() line2", "int(line2[3])) / 2 res.writelines(line1[0] + ' ' + line1[1] + ' ' +", "l1[i].split() line2 = l2[i].split() val = (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] +", "= open('res','w') def run(): for i in range(len(l1)): line1 = l1[i].split() line2 =", "l2[i].split() val = (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] + ' ' +", "'r').readlines() l2 = open(file2_name, 'r').readlines() res = open('res','w') def run(): for i in", "' ' + line1[1] + ' ' + line1[2] + ' ' +", "__init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res =", "'r').readlines() res = open('res','w') def run(): for i in range(len(l1)): line1 = l1[i].split()", "/ 2 res.writelines(line1[0] + ' ' + line1[1] + ' ' + line1[2]", "(int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] + ' ' + line1[1] + '", "= 'titanic' class Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__() l1 =", "open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res = open('res','w') def run(): for i", "= l2[i].split() val = (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] + ' '", "= open(file2_name, 'r').readlines() res = open('res','w') def run(): for i in range(len(l1)): line1", "'titanic' class Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__() l1 = open(file1_name,", "' ' + line1[2] + ' ' + str(val) + ' \\n') run()", "open('res','w') def run(): for i in range(len(l1)): line1 = l1[i].split() line2 = l2[i].split()", "+ ' ' + line1[2] + ' ' + str(val) + ' \\n')", "run(): for i in range(len(l1)): line1 = l1[i].split() line2 = l2[i].split() val =", "Item\"\"\" def __init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines()", "Item(object): \"\"\"tring for Item\"\"\" def __init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2", "line1[1] + ' ' + line1[2] + ' ' + str(val) + '", "res.writelines(line1[0] + ' ' + line1[1] + ' ' + line1[2] + '", "+ int(line2[3])) / 2 res.writelines(line1[0] + ' ' + line1[1] + ' '", "= (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] + ' ' + line1[1] +", "for Item\"\"\" def __init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2 = open(file2_name,", "def __init__(self): super(Item, self).__init__() l1 = open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res", "in range(len(l1)): line1 = l1[i].split() line2 = l2[i].split() val = (int(line1[3]) + int(line2[3]))", "val = (int(line1[3]) + int(line2[3])) / 2 res.writelines(line1[0] + ' ' + line1[1]", "l1 = open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res = open('res','w') def run():", "self).__init__() l1 = open(file1_name, 'r').readlines() l2 = open(file2_name, 'r').readlines() res = open('res','w') def", "line1 = l1[i].split() line2 = l2[i].split() val = (int(line1[3]) + int(line2[3])) / 2" ]
[ ":param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if", "in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name return 1 def renameFile(old_name:", "return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder: Folder = None, target_file:", "target_file.data def delFile(file_name: str, file_table: list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param", "path.replace(\" \", \"\") path_node_list = path.split('/') if path_node_list[0] == \"\": path_node_list = path_node_list[1:]", "写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position", "not state: state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table)", "读数据 else: return target.data # 写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够 if", "-1 else: target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names: parent_node =", "Disk=Disk, file_table=file_table) else: # 不存在问题 if not path_node_list[i] in child_node_names: return 0 target", "state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id f_table", "file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \",", "/ 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am", ":return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder):", "initFileSystem(DiskSize: int = 1000, state: bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param", "findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件", "Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table:", "\"\"\" if not parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes)) if name in", "return -1 else: file_obj.file_name = new_name return 1 def writeFile(file_name: str, content: str,", "isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else: return", "for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data} elif isinstance(parent_node, UserFile): return {parent_node.__str__():", "range(DiskSize)] # 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None,", "start_index = -1 space_counter = 0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if", "path = path.replace(\" \", \"\") path_node_list = path.split('/') if path_node_list[0] == \"\": path_node_list", "Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file =", "== FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size", "= list(map(str, parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in", "FileAuthority(Enum): Default = 0 ReadOnly = 1 WriteOnly = 2 class Folder: def", "child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name, child_node) if result is", "每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)): if i ==", "Folder): return -1 new_folder = Folder(name, parent_folder, child_nodes) if not name == 'root':", "else: target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])]", "str, new_name: str, root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param", "0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return", "\"\"\" path = path.replace(\" \", \"\") path_node_list = path.split('/') if path_node_list[0] == \"\":", "+ '/' + path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path:", "= 1 Create = 2 Rename = 3 Delete = 4 Redirect =", "= list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息", "\"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return", "\"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and", "size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3", "folder_obj = findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return 0", "folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name return", "\"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param", "path_node_list = path.split('/') if path_node_list[0] == \"\": path_node_list = path_node_list[1:] if len(path_node_list) <", "IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node,", "= Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder self.data = data self.size =", "target.authority == FileAuthority.WriteOnly: return -1 # 读数据 else: return target.data # 写文件 elif", "isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"]", "writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list,", "target.authority == FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"]", "Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj", "target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file,", "for child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name, child_node) if result", "[] child_nodes = list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data} elif", "file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter = 0 for i in range(len(Disk)):", "if not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node in parent_folder.child_nodes:", "Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容", "Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index =", ":param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\"", "不存在问题 if not path_node_list[i] in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件", "list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表 :param", "None, data=None, Disk=disk, file_table=f_table) if not state: state = True default_folder_1 = creatFileOrFolder(True,", "creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a file", "path_node_list[i] in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] ==", "Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes = child_nodes def __str__(self): return", "f return -1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点", "i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1 def findFileById(file_id: int, file_table:", "str, new_name: str, root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param", "math from enum import Enum from kernal import Tool class FileOperation(Enum): Read =", "= target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/' + path_now", "parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id", "self.parent_node = parent_folder self.data = data self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值", "Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table:", "-1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder:", "-1 else: return target_file.data def delFile(file_name: str, file_table: list, root: Folder, Disk: list):", "and isinstance(node, Folder): return -1 new_folder = Folder(name, parent_folder, child_nodes) if not name", "return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param", "is not None: return result return None def renameFolder(old_name: str, new_name: str, root:", "UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk)", "None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in", "not parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes)) if name in child_node_names: return", "class FileAuthority(Enum): Default = 0 ReadOnly = 1 WriteOnly = 2 class Folder:", "1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else:", "= findObjByName(name, child_node) if result is not None: return result return None def", "print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name return 1 def renameFile(old_name: str, new_name:", "pathToObj(path: str, IR: dict, file_table: list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param", "root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk, f_table def FileTree(parent_node): #", "if is_folder and target_folder.folder_name == 'root': return '/root' if not is_folder: path_now =", "IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\") path_node_list =", "content target_file.size = math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def", "有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority = authority def __str__(self): return", "TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list, Disk: list,", "clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for", "parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if", "# TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1", "Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file", "start_index else: start_index = -1 space_counter = 0 return -1 def writeDiskToTXT(): #", "-1 else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size = math.ceil(len(content) / 10) target_file.disk_position", "str, parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值", "clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in", "创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象", "else: return 0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘", "self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes = child_nodes def __str__(self): return self.folder_name", "data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes = [] if is_folder: if", "new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name return 1 def", "return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1", "= path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0] != 'root': return 0 #", "'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for", "or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk)", "Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root) if target_folder is", "child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称", "math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I", "-1 # 文件在磁盘中的位置 self.authority = authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile,", "1: # 单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\":", ":param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if file_obj is None: print('找不到该文件')", "= findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理", "return 0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param", "path = getPath(True, target_folder=parent_node) + '/' + path_now return path \"\"\" 路径的格式为: /root/aaa/w", "file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for", "return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file)", "# 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)): if i", "= data self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2.", "10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am very", "data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for test', Disk=disk,", "if isinstance(child_node, Folder): result = findObjByName(name, child_node) if result is not None: return", "== FileAuthority.WriteOnly: return -1 # 读数据 else: return target.data # 写文件 elif IR[\"operator\"]", "str(node) == name and isinstance(node, Folder): return -1 new_folder = Folder(name, parent_folder, child_nodes)", "= 5 class FileAuthority(Enum): Default = 0 ReadOnly = 1 WriteOnly = 2", "space_counter += 1 if space_counter >= file_to_allocated.size: for j in range(start_index, start_index +", "__init__(self, file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名", "= [] if is_folder: if parent_folder is not None: for node in parent_folder.child_nodes:", "def __str__(self): return self.folder_name class UserFile: def __init__(self, file_name: str, parent_folder, data, authority:", "UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index", "parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return", "else: for child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name, child_node) if", "str(node) == name and isinstance(node, UserFile): # 同路径重名 return -1 new_file = UserFile(name,", "def renameFile(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param", "list(map(str, parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes:", "data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据", "Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True,", "not None: for node in parent_folder.child_nodes: if str(node) == name and isinstance(node, Folder):", "if not path_node_list[i] in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if", "am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置", "\"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _", "child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i] in", "or path_node_list[0] != 'root': return 0 # 从root出发 parent_node = root # 每次都会更新子节点们", "= getPath(True, target_folder=parent_node) + '/' + path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹", "eg2. I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1", "file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is", "I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority =", "child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name", "Rename = 3 Delete = 4 Redirect = 5 class FileAuthority(Enum): Default =", "== len(path_node_list) - 1: # 单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif", "bool, name: str, parent_folder: Folder, file_table: list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹", "== -1: if start_index == -1: start_index = i space_counter += 1 if", "len(path_node_list) - 1: # 单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"]", ">= file_to_allocated.size: for j in range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return", "= parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority ==", "10) target.disk_position = contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] == \"delFile\": if isinstance(target,", "else: folder_obj.folder_name = new_name return 1 def renameFile(old_name: str, new_name: str, root: Folder):", "root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if target_file is", "str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param", "return -1 new_folder = Folder(name, parent_folder, child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder)", "path.split('/') if path_node_list[0] == \"\": path_node_list = path_node_list[1:] if len(path_node_list) < 1 or", "new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹') #", "if is_folder: if parent_folder is not None: for node in parent_folder.child_nodes: if str(node)", "default_folder_2, default_folder_3] return state, root_node, disk, f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node,", "1 def redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点", "主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index == -1: start_index = i space_counter", "content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file,", "file_table: list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table:", "class FileOperation(Enum): Read = 0 Write = 1 Create = 2 Rename =", "in parent_folder.child_nodes: if str(node) == name and isinstance(node, Folder): return -1 new_folder =", "IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题", "if f.id == file_id: return f return -1 def findObjByName(name: str, parent_node): \"\"\"", "not is_folder: path_now = target_file.file_name parent_node = target_file.parent_node else: path_now = target_folder.folder_name parent_node", ":param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root)", "def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list, Disk: list, data, child_nodes=None):", "None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root)", "TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder", "for node in parent_folder.child_nodes: if str(node) == name and isinstance(node, Folder): return -1", "elif IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly: return -1 #", "[] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if not", ":param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root)", "if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def", "-2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile", "路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict, file_table: list, Disk: list,", "= creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None,", "= authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配", "root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root)", "return -1 else: target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\": if", "= [] child_nodes = list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data}", "target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names", "return 0 target_folder = findObjByName(target_folder_name, root) if target_folder is None or isinstance(target_folder, UserFile):", "Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly:", "content: str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root:", "target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder =", "target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str, root: Folder): \"\"\" 读取文件", ":param Disk: 文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\"", "Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int = 1000,", "in range(DiskSize)] # 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root',", "root_node, disk, f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data = []", "return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\": # 权限不够", "= [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件", "print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node", "Disk[i] == -1: if start_index == -1: start_index = i space_counter += 1", "== -1: start_index = i space_counter += 1 if space_counter >= file_to_allocated.size: for", "if space_counter >= file_to_allocated.size: for j in range(start_index, start_index + file_to_allocated.size): Disk[j] =", "contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param root:", "\"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in", "from enum import Enum from kernal import Tool class FileOperation(Enum): Read = 0", "isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def", "\"\": path_node_list = path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0] != 'root': return", "contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\"", "-1 new_folder = Folder(name, parent_folder, child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder) return", ":param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None child_node_names =", "if result is not None: return result return None def renameFolder(old_name: str, new_name:", "node in parent_folder.child_nodes: if str(node) == name and isinstance(node, UserFile): # 同路径重名 return", "file_to_allocated.id return start_index else: start_index = -1 space_counter = 0 return -1 def", "elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target)", "print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足')", "None: for node in parent_folder.child_nodes: if str(node) == name and isinstance(node, Folder): return", "目录物理结构:连续形 TODO:磁盘外部碎片如何处理? TODO:磁盘IO添加中断 \"\"\" import math from enum import Enum from kernal import", "\"\"\" import math from enum import Enum from kernal import Tool class FileOperation(Enum):", "path_node_list = path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0] != 'root': return 0", "== \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if", "FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id", "target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file,", ":param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes", "path_now = target_file.file_name parent_node = target_file.parent_node else: path_now = target_folder.folder_name parent_node = target_folder.parent_node", "path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict, file_table: list,", "for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk,", ":param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes = []", "node in parent_folder.child_nodes: if str(node) == name and isinstance(node, Folder): return -1 new_folder", "IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly: return -1 # 读数据", "0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\": # 权限不够 if", "default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is", "== \"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly: return -1 # 读数据 else:", "file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if f.id ==", "folder_obj.folder_name = new_name return 1 def renameFile(old_name: str, new_name: str, root: Folder): \"\"\"", "= findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names", "# TODO:异常处理 return -1 else: return target_file.data def delFile(file_name: str, file_table: list, root:", "# 不存在问题 if not path_node_list[i] in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] #", "读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if", "path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node,", "= None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if", "return target_file.data def delFile(file_name: str, file_table: list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录", "文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param", "new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name return 1 def", "file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file,", "if child_nodes is None: child_nodes = [] if is_folder: if parent_folder is not", "return f return -1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param", "__str__(self): return self.folder_name class UserFile: def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority", "list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)): if i == len(path_node_list) - 1:", "Disk[j] = file_to_allocated.id return start_index else: start_index = -1 space_counter = 0 return", ":param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹')", "UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\"", "= new_name return 1 def renameFile(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件", "# TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突')", "Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in", "creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk,", "self.file_name = file_name self.parent_node = parent_folder self.data = data self.size = math.ceil(len(data) /", "TODO:磁盘外部碎片如何处理? TODO:磁盘IO添加中断 \"\"\" import math from enum import Enum from kernal import Tool", "Enum from kernal import Tool class FileOperation(Enum): Read = 0 Write = 1", "UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize:", "写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly: return -1", "parent_folder: Folder, file_table: list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘", "return -1 # 写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"])", "target_folder = findObjByName(target_folder_name, root) if target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在') #", ":param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id", "root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param", "is None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name", "parent_folder.child_nodes.append(new_folder) return new_folder else: for node in parent_folder.child_nodes: if str(node) == name and", "重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name,", ":param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or", "\"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if not", "is_folder: if parent_folder is not None: for node in parent_folder.child_nodes: if str(node) ==", "-1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position", "同路径重名 return -1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk)", "state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 =", "\"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority = authority def __str__(self): return self.file_name", "for j in range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else:", "parent_node.child_nodes)) for i in range(1, len(path_node_list)): if i == len(path_node_list) - 1: #", "range(1, len(path_node_list)): if i == len(path_node_list) - 1: # 单纯的查询文件目录树 if IR is", "return None child_node_names = list(map(str, parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else:", "else: return target_file.data def delFile(file_name: str, file_table: list, root: Folder, Disk: list): \"\"\"", "state: bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk", "= list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data} elif isinstance(parent_node, UserFile):", "= content target_file.size = math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1", "child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes =", "1 if space_counter >= file_to_allocated.size: for j in range(start_index, start_index + file_to_allocated.size): Disk[j]", "Create = 2 Rename = 3 Delete = 4 Redirect = 5 class", "FileOperation(Enum): Read = 0 Write = 1 Create = 2 Rename = 3", "list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum()", ":param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if file_obj is", "文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if not state: state", "Disk: 文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path", "Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk)", "very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority = authority def", "ReadOnly = 1 WriteOnly = 2 class Folder: def __init__(self, folder_name: str, parent_folder,", "\"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly: return -1 # 读数据 else: return", "root) if folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names = list(map(str,", "print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names:", "new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if file_obj is None: print('找不到该文件') #", "child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"] ==", "TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return", "1 def initFileSystem(DiskSize: int = 1000, state: bool = False): \"\"\" 文件系统初始化 :param", "return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name:", "target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder): print('文件不存在') #", "parent_folder, child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node", ":param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position + target_file.size):", "= Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes = child_nodes def __str__(self):", "return '/root' if not is_folder: path_now = target_file.file_name parent_node = target_file.parent_node else: path_now", "elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"]", "if IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly: return -1 #", "return target.data # 写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority ==", "isinstance(parent_node, Folder): data = [] child_nodes = list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child))", "child_nodes is None: child_nodes = [] if is_folder: if parent_folder is not None:", ":param file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\"", "'/root' if not is_folder: path_now = target_file.file_name parent_node = target_file.parent_node else: path_now =", "data='This is a file for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3]", "文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] =", "= 1 WriteOnly = 2 class Folder: def __init__(self, folder_name: str, parent_folder, child_nodes:", ":param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or", "target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file,", ":param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name", ":return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if f.id == file_id: return f return", "文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if target_file is None", "= list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name =", "= IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk) return 1", "0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1", "0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else:", "child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data} elif isinstance(parent_node, UserFile): return {parent_node.__str__(): 0}", "str, parent_folder: Folder, file_table: list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk:", "bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk =", "child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node in", "list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position,", "\"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name", "if i == len(path_node_list) - 1: # 单纯的查询文件目录树 if IR is None: return", "creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i],", "def writeFile(file_name: str, content: str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk:", "是目录 if isinstance(parent_node, Folder): data = [] child_nodes = list(parent_node.child_nodes) for child in", "file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3',", "IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name =", ":return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\") path_node_list = path.split('/') if path_node_list[0] ==", "class Folder: def __init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名", "f.id == file_id: return f return -1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象", "elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return", "file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for test', Disk=disk, file_table=f_table) root_node.child_nodes", "FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限", "return 1 def initFileSystem(DiskSize: int = 1000, state: bool = False): \"\"\" 文件系统初始化", "= IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names =", "parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name, child_node)", "文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if file_obj", "True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2',", "return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return", "通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table:", "j in range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else: start_index", "for i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1 def findFileById(file_id: int,", "< 1 or path_node_list[0] != 'root': return 0 # 从root出发 parent_node = root", "root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for test',", "else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"]", "Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名", "\"\") path_node_list = path.split('/') if path_node_list[0] == \"\": path_node_list = path_node_list[1:] if len(path_node_list)", "Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表", "文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path =", "None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in", "\"delFile\": if isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1", "return 1 def readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param", "= child_nodes def __str__(self): return self.folder_name class UserFile: def __init__(self, file_name: str, parent_folder,", "直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\") path_node_list = path.split('/')", "am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority = authority", "target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return", "file_obj = findObjByName(old_name, root) if file_obj is None: print('找不到该文件') # TODO:异常处理 return 0", "\"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder): print('文件不存在')", "if target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert", "root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\"", "self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder self.data = data self.size", "== \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\":", "= UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position == -1:", "\"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target,", "[-1 for _ in range(DiskSize)] # 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node", "通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令 :param", "file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作", "文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if target_file", "root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if not state: state =", "in range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else: start_index =", "contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return 0", "child_nodes = list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data} elif isinstance(parent_node,", "= target_file.parent_node else: path_now = target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node)", ":param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if target_file is", "i in range(1, len(path_node_list)): if i == len(path_node_list) - 1: # 单纯的查询文件目录树 if", "findObjByName(old_name, root) if file_obj is None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names =", "以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict, file_table: list, Disk: list, root: Folder):", "list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name", "__init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param", "Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names:", "!= 'root': return 0 # 从root出发 parent_node = root # 每次都会更新子节点们 child_node_names =", "Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk, f_table def", "target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority", ":param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file", "list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param content:新内容", "\"\"\" 目录逻辑结构:树形 目录物理结构:连续形 TODO:磁盘外部碎片如何处理? TODO:磁盘IO添加中断 \"\"\" import math from enum import Enum from", "'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table)", "renameFolder(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称", "str, root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功", "name and isinstance(node, Folder): return -1 new_folder = Folder(name, parent_folder, child_nodes) if not", "= target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/' + path_now return path \"\"\"", "Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param IR:", "for i in range(1, len(path_node_list)): if i == len(path_node_list) - 1: # 单纯的查询文件目录树", "list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹", "if isinstance(parent_node, Folder): data = [] child_nodes = list(parent_node.child_nodes) for child in child_nodes:", "i space_counter += 1 if space_counter >= file_to_allocated.size: for j in range(start_index, start_index", "return 1 elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes))", "文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name =", "parent_node = target_file.parent_node else: path_now = target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True,", "= True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True,", "Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file", "TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理", "default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node,", "None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件", "# 单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return", "file_obj is None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if", "0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str,", "in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name return 1 def writeFile(file_name:", "= i space_counter += 1 if space_counter >= file_to_allocated.size: for j in range(start_index,", "彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功", "default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node,", "if path_node_list[0] == \"\": path_node_list = path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0]", "new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder:", "disk = [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id f_table = [] #", "self.data = data self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1", "target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param", "if Disk[i] == -1: if start_index == -1: start_index = i space_counter +=", "print('文件不存在') # TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root) if target_folder is None", "TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def", "\"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter", "_ in range(DiskSize)] # 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True,", "# TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list, Disk:", "= IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突')", "if target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder", "path_node_list[0] == \"\": path_node_list = path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0] !=", "else: return target.data # 写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority", "findObjByName(target_folder_name, root) if target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return", "file_obj.file_name = new_name return 1 def writeFile(file_name: str, content: str, root: Folder, Disk:", "result is not None: return result return None def renameFolder(old_name: str, new_name: str,", "= 0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if", "for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index ==", "FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size", "in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return 0 def", "parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') #", "'test', default_folder_1, data='This is a file for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1,", "or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder,", "UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i", "readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\"", "= target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int = 1000, state: bool =", "Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter = 0 for", "-1 space_counter = 0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] ==", "该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if", "redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称", "isinstance(node, UserFile): # 同路径重名 return -1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position", "i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index == -1:", "\"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名", "= 2 Rename = 3 Delete = 4 Redirect = 5 class FileAuthority(Enum):", "= 4 Redirect = 5 class FileAuthority(Enum): Default = 0 ReadOnly = 1", "child_nodes = [] if is_folder: if parent_folder is not None: for node in", "in file_table: if f.id == file_id: return f return -1 def findObjByName(name: str,", "list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表", "file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突')", "file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if f.id == file_id: return f", "\"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None", "i == len(path_node_list) - 1: # 单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])]", "= contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return", "'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node in parent_folder.child_nodes: if str(node) == name", "\"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"] return", ":param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\") path_node_list = path.split('/') if", "is None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name", "return -1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if", "== FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data = content", ":param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据", "str, file_table: list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param", "data = [] child_nodes = list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__():", "is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name,", "= parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile, Disk:", "is None: child_nodes = [] if is_folder: if parent_folder is not None: for", "parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if not path_node_list[i] in child_node_names: return", "return -1 else: target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names: parent_node", ":return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes = [] if is_folder: if parent_folder", "if folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes))", "sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority", "child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes = [] if", "FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data = [] child_nodes = list(parent_node.child_nodes) for", "0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\": if", "file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name return", ":param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter = 0 for i in", "return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if not path_node_list[i]", "child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name return 1 def writeFile(file_name: str,", "parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile,", "print('文件权限不足') # TODO:异常处理 return -1 else: return target_file.data def delFile(file_name: str, file_table: list,", "is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes", "= file_name self.parent_node = parent_folder self.data = data self.size = math.ceil(len(data) / 10)", "在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name,", "Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for test', Disk=disk, file_table=f_table)", "self.folder_name class UserFile: def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default):", "if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"] return 1", "= False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1", "getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param", "文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None", "self.child_nodes = child_nodes def __str__(self): return self.folder_name class UserFile: def __init__(self, file_name: str,", "return 0 # 从root出发 parent_node = root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes))", "1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else:", "is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile)", "UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file)", "return new_file def getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile = None):", "parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node", "target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int = 1000, state:", "== 'root': return '/root' if not is_folder: path_now = target_file.file_name parent_node = target_file.parent_node", "return start_index else: start_index = -1 space_counter = 0 return -1 def writeDiskToTXT():", "= math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] ==", "creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk,", "folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if", "list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name", "a file for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state,", "target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/' + path_now return", "path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return 0", "return -1 else: return target_file.data def delFile(file_name: str, file_table: list, root: Folder, Disk:", "range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else: start_index = -1", "target.disk_position = contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder):", "IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return", "\"\"\" folder_obj = findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return", "target_file.size = math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def readFile(file_name:", "Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param", "target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int = 1000, state: bool = False):", "<filename>kernal/FileSystem.py<gh_stars>1-10 \"\"\" 目录逻辑结构:树形 目录物理结构:连续形 TODO:磁盘外部碎片如何处理? TODO:磁盘IO添加中断 \"\"\" import math from enum import Enum", "# 写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly: return", ":return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder):", "not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node in parent_folder.child_nodes: if", "child_node_names = list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)): if i == len(path_node_list)", "parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name, child_node) if result is not None:", "return self.folder_name class UserFile: def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority =", ":param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name = file_name", "None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk,", "parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif", "# TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突')", "or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root) if", "file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else: start_index = -1 space_counter = 0", "return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return", "target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹", "Folder): data = [] child_nodes = list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return", "return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder:", "def renameFolder(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param", "# 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index == -1: start_index = i", "and target_folder.folder_name == 'root': return '/root' if not is_folder: path_now = target_file.file_name parent_node", "'/' + path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str,", "len(path_node_list) < 1 or path_node_list[0] != 'root': return 0 # 从root出发 parent_node =", "child_node_names = list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\"", "= 2 class Folder: def __init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间", "child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name return 1 def renameFile(old_name: str,", "UserFile: def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构", "new_name return 1 def writeFile(file_name: str, content: str, root: Folder, Disk: list): \"\"\"", "and isinstance(node, UserFile): # 同路径重名 return -1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file)", "root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\"", "= path.split('/') if path_node_list[0] == \"\": path_node_list = path_node_list[1:] if len(path_node_list) < 1", ":param root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name,", "new_file def getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile = None): \"\"\"", "is None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile)", "\"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes = child_nodes", "== \"\": path_node_list = path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0] != 'root':", "-1 space_counter = 0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder:", "FileAuthority.WriteOnly: return -1 # 读数据 else: return target.data # 写文件 elif IR[\"operator\"] ==", "Write = 1 Create = 2 Rename = 3 Delete = 4 Redirect", ":param Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param", "+ path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR:", "child_node_names = list(map(str, parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node", "写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\"", "else: target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in", "or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority", "0 target_folder = findObjByName(target_folder_name, root) if target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在')", "if target.authority == FileAuthority.WriteOnly: return -1 # 读数据 else: return target.data # 写文件", ":param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is", "isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder)", ":param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if f.id == file_id: return", "if not parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes)) if name in child_node_names:", "return 1 def redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root:", "= parent_folder self.child_nodes = child_nodes def __str__(self): return self.folder_name class UserFile: def __init__(self,", "parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None,", "\"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限", ":return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder):", "= creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None,", "TODO:磁盘IO添加中断 \"\"\" import math from enum import Enum from kernal import Tool class", "in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\":", "default_folder_1, data='This is a file for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2,", "def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足", "Redirect = 5 class FileAuthority(Enum): Default = 0 ReadOnly = 1 WriteOnly =", "IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target)", "/ 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str, root: Folder):", "== \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"]", "root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param", "path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if not path_node_list[i] in child_node_names:", "isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority ==", "Disk=disk, file_table=f_table) if not state: state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node,", ":param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes = [] if is_folder:", "else: for node in parent_folder.child_nodes: if str(node) == name and isinstance(node, UserFile): #", "== \"delFile\": if isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return", ":param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串", "def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\"", "/root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict, file_table: list, Disk: list, root:", "'root', None, data=None, Disk=disk, file_table=f_table) if not state: state = True default_folder_1 =", "print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names:", "self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes = child_nodes def", "isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int =", "print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder: Folder", "def pathToObj(path: str, IR: dict, file_table: list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹", "findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return", "for node in parent_folder.child_nodes: if str(node) == name and isinstance(node, UserFile): # 同路径重名", "# TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') #", "= 0 ReadOnly = 1 WriteOnly = 2 class Folder: def __init__(self, folder_name:", "range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1 def findFileById(file_id: int, file_table: list): \"\"\"", "TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size = math.ceil(len(content) /", "return 1 elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1", "= folder_name self.parent_node = parent_folder self.child_nodes = child_nodes def __str__(self): return self.folder_name class", "data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test',", "# 权限不够 if target.authority == FileAuthority.WriteOnly: return -1 # 读数据 else: return target.data", "isinstance(node, Folder): return -1 new_folder = Folder(name, parent_folder, child_nodes) if not name ==", "TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder: Folder = None,", "TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root) if target_folder is None or isinstance(target_folder,", "== 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node in parent_folder.child_nodes: if str(node) ==", ":param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if target_file", "# 从root出发 parent_node = root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i", "Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly:", "磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk,", "dict, file_table: list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk:", ":param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter = 0", "for f in file_table: if f.id == file_id: return f return -1 def", "path_node_list[0] != 'root': return 0 # 从root出发 parent_node = root # 每次都会更新子节点们 child_node_names", "1 or path_node_list[0] != 'root': return 0 # 从root出发 parent_node = root #", "old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if folder_obj is None:", "\"\"\" disk = [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id f_table = []", "file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None", "def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data = [] child_nodes = list(parent_node.child_nodes)", "authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param", "parent_folder.child_nodes: if str(node) == name and isinstance(node, Folder): return -1 new_folder = Folder(name,", "file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if target_file is None", "文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file =", "def __init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None", "= -1 # 文件在磁盘中的位置 self.authority = authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated:", "== name and isinstance(node, Folder): return -1 new_folder = Folder(name, parent_folder, child_nodes) if", "f in file_table: if f.id == file_id: return f return -1 def findObjByName(name:", "Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder self.data = data self.size = math.ceil(len(data)", "Folder, file_table: list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param", "new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return -2", "in parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name, child_node) if result is not", "return None def renameFolder(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件夹 :param root:", "print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足')", "root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if", "-1: start_index = i space_counter += 1 if space_counter >= file_to_allocated.size: for j", "child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\": #", "is_folder and target_folder.folder_name == 'root': return '/root' if not is_folder: path_now = target_file.file_name", "= new_name return 1 def writeFile(file_name: str, content: str, root: Folder, Disk: list):", "\", \"\") path_node_list = path.split('/') if path_node_list[0] == \"\": path_node_list = path_node_list[1:] if", "\"\"\" if is_folder and target_folder.folder_name == 'root': return '/root' if not is_folder: path_now", "None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert", "if len(path_node_list) < 1 or path_node_list[0] != 'root': return 0 # 从root出发 parent_node", ":param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1", "state, root_node, disk, f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data =", "print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\":", "0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1", "+ target_file.size): Disk[i] = -1 def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象", "= -1 space_counter = 0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i]", "isinstance(child_node, Folder): result = findObjByName(name, child_node) if result is not None: return result", "assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int", "有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position =", "重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name,", "parent_node = root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i in range(1,", ":return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter = 0 for i in range(len(Disk)): #", "Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i]", "磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter =", "name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None:", "Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1,", "target_folder.folder_name == 'root': return '/root' if not is_folder: path_now = target_file.file_name parent_node =", "else: # 不存在问题 if not path_node_list[i] in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])]", "new_folder = Folder(name, parent_folder, child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder", "Folder): result = findObjByName(name, child_node) if result is not None: return result return", "new_name return 1 def renameFile(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件 :param", "root_node, data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3", "else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size = math.ceil(len(content) / 10) target_file.disk_position =", "WriteOnly = 2 class Folder: def __init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\"", "child_node) if result is not None: return result return None def renameFolder(old_name: str,", "Read = 0 Write = 1 Create = 2 Rename = 3 Delete", "file_name self.parent_node = parent_folder self.data = data self.size = math.ceil(len(data) / 10) \"\"\"", "-1 def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param", "return 1 elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return 0 else: clearFileInDisk(target,", "str, root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file", "list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f", "UserFile): # 同路径重名 return -1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position =", "\"\"\" for f in file_table: if f.id == file_id: return f return -1", "-1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\"", "parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly:", "file_table: list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘", "target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk) return", "child_nodes def __str__(self): return self.folder_name class UserFile: def __init__(self, file_name: str, parent_folder, data,", "Disk[i] = -1 def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table:", ":param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes))", "\"\"\" def pathToObj(path: str, IR: dict, file_table: list, Disk: list, root: Folder): \"\"\"", "old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if file_obj is None:", "print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size =", "file_table: list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root:", "Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file)", "0 Write = 1 Create = 2 Rename = 3 Delete = 4", ":param root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root)", "Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name,", "space_counter >= file_to_allocated.size: for j in range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id", "[default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk, f_table def FileTree(parent_node): # 是目录 if", "parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param", "target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径", "int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\"", "print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name return 1 def writeFile(file_name: str, content:", "delFile(file_name: str, file_table: list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘", "Folder: def __init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param", "root) if target_file is None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0", "print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return", "# 读文件 if IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly: return", "root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if", "= creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if not state: state = True", "file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else:", "None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder", ":param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name ==", "writeFile(file_name: str, content: str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘", "利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None child_node_names", "文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum()", "space_counter = 0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool,", "else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position =", "def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if", "IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name =", "FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else: return target_file.data def delFile(file_name: str, file_table:", "if str(node) == name and isinstance(node, Folder): return -1 new_folder = Folder(name, parent_folder,", "len(path_node_list)): if i == len(path_node_list) - 1: # 单纯的查询文件目录树 if IR is None:", "return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table)", "parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\" if child_nodes is None: child_nodes =", "target_file.size): Disk[i] = -1 def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param", "IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly: return -1 # 写数据", "IR: dict, file_table: list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param", "if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name = new_name return 1", "= parent_folder self.data = data self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1.", "in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result", "root) if file_obj is None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names = list(map(str,", "import math from enum import Enum from kernal import Tool class FileOperation(Enum): Read", "def getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径", "return -1 else: folder_obj.folder_name = new_name return 1 def renameFile(old_name: str, new_name: str,", "return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1", "start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else: start_index = -1 space_counter", "IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] ==", "in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"]", "1000, state: bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\"", "-1 # 写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) /", "parent_folder self.data = data self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes", "target_file.parent_node else: path_now = target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node) +", "file_to_allocated.size: for j in range(start_index, start_index + file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index", "= findObjByName(old_name, root) if file_obj is None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names", "file_table: 文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2", "权限不够 if target.authority == FileAuthority.WriteOnly: return -1 # 读数据 else: return target.data #", "1 elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else:", "# 写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10)", "file_table=file_table) else: # 不存在问题 if not path_node_list[i] in child_node_names: return 0 target =", "file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param", "+= 1 if space_counter >= file_to_allocated.size: for j in range(start_index, start_index + file_to_allocated.size):", "def readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足", ":param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if target_file", "list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表", "文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if target_file", "= -1 space_counter = 0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def", "文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _ in", "not path_node_list[i] in child_node_names: return 0 target = parent_node.child_nodes[child_node_names.index(path_node_list[i])] # 读文件 if IR[\"operator\"]", "child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name", "result return None def renameFolder(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件夹 :param", "parent_folder.child_nodes: if str(node) == name and isinstance(node, UserFile): # 同路径重名 return -1 new_file", ":param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root)", "kernal import Tool class FileOperation(Enum): Read = 0 Write = 1 Create =", "\"\"\" for i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1 def findFileById(file_id:", "file_table: if f.id == file_id: return f return -1 def findObjByName(name: str, parent_node):", "文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if folder_obj", ":return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if file_obj is None: print('找不到该文件') # TODO:异常处理", "isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file,", "findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹') # TODO:异常处理 return 0 child_node_names =", "math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str, root:", "enum import Enum from kernal import Tool class FileOperation(Enum): Read = 0 Write", "return new_folder else: for node in parent_folder.child_nodes: if str(node) == name and isinstance(node,", "文件表 :param is_folder:是否是文件夹 :param name:文件/文件夹名称 :param parent_folder:父文件夹对象 :param child_nodes:文件夹内容 :param data:文件数据 :return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2 \"\"\"", "elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: #", "权限不够 if target.authority == FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target, Disk) target.data", "list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param", ":return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id f_table =", "parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes)) if", "path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\") path_node_list = path.split('/') if path_node_list[0]", "Folder(name, parent_folder, child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for", "name and isinstance(node, UserFile): # 同路径重名 return -1 new_file = UserFile(name, parent_folder, data)", "file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return", "Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param", "文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if f.id == file_id:", "not None: return result return None def renameFolder(old_name: str, new_name: str, root: Folder):", "\"\"\" if child_nodes is None: child_nodes = [] if is_folder: if parent_folder is", "\"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj =", "eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\"", "= list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: file_obj.file_name =", "int = 1000, state: bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志", "target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1 def", "assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else:", "assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else:", "-1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder,", "if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"] return 1", "path_now = target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/' +", ":return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file, Folder):", "IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"] return 1 elif", "str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes:", "return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict, file_table:", ":param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder self.child_nodes", "name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if isinstance(child_node, Folder):", "in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index == -1: start_index", "== \"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly: return -1 # 写数据 else:", "1 WriteOnly = 2 class Folder: def __init__(self, folder_name: str, parent_folder, child_nodes: list):", "authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param", "name: str, parent_folder: Folder, file_table: list, Disk: list, data, child_nodes=None): \"\"\" 创建文件或文件夹 :param", "10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str, root: Folder): \"\"\"", "parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk:", "self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am", "test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk, f_table", "IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk) return 1 elif", "UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误')", "parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node = parent_folder", "root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if", "'root': return 0 # 从root出发 parent_node = root # 每次都会更新子节点们 child_node_names = list(map(str,", "if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if isinstance(child_node,", "self.authority = authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\"", "target_file.data = content target_file.size = math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return", "parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile, Disk: list):", "\"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am very handsome.", "math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] == \"delFile\":", "目录逻辑结构:树形 目录物理结构:连续形 TODO:磁盘外部碎片如何处理? TODO:磁盘IO添加中断 \"\"\" import math from enum import Enum from kernal", "\"\"\" file_obj = findObjByName(old_name, root) if file_obj is None: print('找不到该文件') # TODO:异常处理 return", "else: file_obj.file_name = new_name return 1 def writeFile(file_name: str, content: str, root: Folder,", "return -1 # 读数据 else: return target.data # 写文件 elif IR[\"operator\"] == \"writeFile\":", "def redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param", "target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int = 1000, state: bool = False): \"\"\"", "if target.authority == FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target, Disk) target.data =", "def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk:", "data=None, Disk=disk, file_table=f_table) default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 =", "None child_node_names = list(map(str, parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for", ":param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or", ":param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node =", "0 ReadOnly = 1 WriteOnly = 2 class Folder: def __init__(self, folder_name: str,", "isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root) if target_folder", "if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else: folder_obj.folder_name = new_name return 1", "return 1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1", "\"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj =", "child_node_names: return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result =", "assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1", "range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index == -1: start_index =", "f_table = [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table)", "else: path_now = target_folder.folder_name parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/'", "list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param", "state: state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk, file_table=f_table) default_folder_2", "= [default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk, f_table def FileTree(parent_node): # 是目录", "def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table:", "str, IR: dict, file_table: list, Disk: list, root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点", "Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table)", "return 1 def writeFile(file_name: str, content: str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除)", "文件系统磁盘 :param root: 文件系统根节点 :param file_name:文件名 :param content:新内容 :return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限 \"\"\" target_file = findObjByName(file_name,", "root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False,", "if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else: return target_file.data def", "folder_name self.parent_node = parent_folder self.child_nodes = child_nodes def __str__(self): return self.folder_name class UserFile:", "name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else: for node in parent_folder.child_nodes: if str(node)", "= target_file.file_name parent_node = target_file.parent_node else: path_now = target_folder.folder_name parent_node = target_folder.parent_node path", "文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1 space_counter = 0 for i", "DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _ in range(DiskSize)] #", "= [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if", "result = findObjByName(name, child_node) if result is not None: return result return None", "parent_node = target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/' + path_now return path", "文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if", "root) if target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0", "= 1000, state: bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表", "从root出发 parent_node = root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i in", "new_name: str, root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称", "self.parent_node = parent_folder self.child_nodes = child_nodes def __str__(self): return self.folder_name class UserFile: def", "5 class FileAuthority(Enum): Default = 0 ReadOnly = 1 WriteOnly = 2 class", "= contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file)", "None: return result return None def renameFolder(old_name: str, new_name: str, root: Folder): \"\"\"", "return result return None def renameFolder(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件夹", "Tool class FileOperation(Enum): Read = 0 Write = 1 Create = 2 Rename", "clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target,", ":param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is", ":param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or", "child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str, parent_node.child_nodes)) else: return 0 def clearFileInDisk(target_file:", "if file_obj is None: print('找不到该文件') # TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes))", "target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name == 'root': return '/root' if not", ":param data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node =", "None: child_nodes = [] if is_folder: if parent_folder is not None: for node", "data=None, Disk=disk, file_table=f_table) if not state: state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1',", "start_index = i space_counter += 1 if space_counter >= file_to_allocated.size: for j in", "\"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功 \"\"\" target_file =", "0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1: if start_index", "target_file.parent_node = target_folder target_folder.child_nodes.append(target_file) return 1 def initFileSystem(DiskSize: int = 1000, state: bool", "from kernal import Tool class FileOperation(Enum): Read = 0 Write = 1 Create", "def initFileSystem(DiskSize: int = 1000, state: bool = False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小", "clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name: str, root: Folder):", "file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state, root_node, disk, f_table def FileTree(parent_node):", "target_file.file_name parent_node = target_file.parent_node else: path_now = target_folder.folder_name parent_node = target_folder.parent_node path =", "Disk) target_file.data = content target_file.size = math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk)", "file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file = findObjByName(file_name, root) if target_file is None or isinstance(target_file,", "bool, target_folder: Folder = None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹", ":return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes)) if name", "UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else: return target_file.data", "target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data =", "def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表 :param file_id:文件标识符", "root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\"", "class UserFile: def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\"", "in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"] return 1 elif path_node_list[i]", "target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names:", "0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name: str,", "Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name: str, root: Folder): \"\"\"", "str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\"", "\"\"\" start_index = -1 space_counter = 0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片", "False): \"\"\" 文件系统初始化 :param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for", "文件在磁盘中的位置 self.authority = authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list):", "data self.size = math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I", "if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data", "for _ in range(DiskSize)] # 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node =", "disk, f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data = [] child_nodes", "str, content: str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param", "import Tool class FileOperation(Enum): Read = 0 Write = 1 Create = 2", "/ 10) target.disk_position = contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"] == \"delFile\": if", "parent_folder is not None: for node in parent_folder.child_nodes: if str(node) == name and", "\"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position", "2 class Folder: def __init__(self, folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param", "读文件 if IR[\"operator\"] == \"readFile\": # 权限不够 if target.authority == FileAuthority.WriteOnly: return -1", "new_name: str, root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称", "Disk) return 1 elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return 0 else:", "self.disk_position = -1 # 文件在磁盘中的位置 self.authority = authority def __str__(self): return self.file_name def", "= list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)): if i == len(path_node_list) -", "data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk,", "0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return -1 else:", "elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name", "target.size = math.ceil(len(IR[\"content\"]) / 10) target.disk_position = contiguousAllocation(target, Disk) return 1 elif IR[\"operator\"]", "target_folder: Folder = None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param", "file for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return state, root_node,", "0 # 从root出发 parent_node = root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for", "list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件 :return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足 \"\"\" start_index = -1", "单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False,", "return 1 def renameFile(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件 :param root:", "authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder self.data =", "if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i],", "parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile =", "== \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"]", "list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk: 文件系统磁盘 :param root: 文件系统根节点", "None def renameFolder(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点", "folder_name: str, parent_folder, child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件", "Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj", "= findObjByName(target_folder_name, root) if target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理", "# TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') #", "Disk: 文件系统磁盘 :param root: 文件系统根节点 :param file_table: 文件表 :param file_name:文件名 :return:返回0表示无法找到对应文件,返回1表明删除成功 \"\"\" target_file", "return state, root_node, disk, f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data", "findObjByName(name, child_node) if result is not None: return result return None def renameFolder(old_name:", "# 磁盘,存储文件的id f_table = [] # 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None,", "4 Redirect = 5 class FileAuthority(Enum): Default = 0 ReadOnly = 1 WriteOnly", "new_folder else: for node in parent_folder.child_nodes: if str(node) == name and isinstance(node, UserFile):", "getPath(True, target_folder=parent_node) + '/' + path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\"", "root: Folder): \"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param", "file_id: return f return -1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称", "I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 #", "= contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param", "# 权限不够 if target.authority == FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target, Disk)", "return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] == \"renameFile\":", "\"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file = findObjByName(file_name, root)", "is a file for test', Disk=disk, file_table=f_table) root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3] return", "isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file)", ":param DiskSize:文件系统磁盘大小 :param state:状态标志 :return:状态标志,文件根节点,文件系统磁盘,文件表 \"\"\" disk = [-1 for _ in range(DiskSize)]", "path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict,", "start_index = -1 space_counter = 0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass", "root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称 :return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足 \"\"\" target_file =", "Default = 0 ReadOnly = 1 WriteOnly = 2 class Folder: def __init__(self,", "creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for test', Disk=disk, file_table=f_table) root_node.child_nodes =", "target_folder.parent_node path = getPath(True, target_folder=parent_node) + '/' + path_now return path \"\"\" 路径的格式为:", "Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif IR[\"operator\"] ==", "None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) clearFileInDisk(target_file,", "== name and isinstance(node, UserFile): # 同路径重名 return -1 new_file = UserFile(name, parent_folder,", "= Folder(name, parent_folder, child_nodes) if not name == 'root': parent_folder.child_nodes.append(new_folder) return new_folder else:", "return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk, file_table=file_table) elif IR[\"operator\"] == \"createFolder\": return creatFileOrFolder(True,", "print('找不到该文件') # TODO:异常处理 return 0 child_node_names = list(map(str, file_obj.parent_node.child_nodes)) if new_name in child_node_names:", "def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default): \"\"\" 文件数据结构 :param", ":return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if folder_obj is None: print('找不到该文件夹') # TODO:异常处理", "target_file.disk_position + target_file.size): Disk[i] = -1 def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象", "return parent_node.child_nodes[child_node_names.index(name)] else: for child_node in parent_node.child_nodes: if isinstance(child_node, Folder): result = findObjByName(name,", ":param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name", "target_folder=parent_node) + '/' + path_now return path \"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def", "Delete = 4 Redirect = 5 class FileAuthority(Enum): Default = 0 ReadOnly =", "folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id = Tool.uniqueNum() self.folder_name = folder_name self.parent_node", "= path.replace(\" \", \"\") path_node_list = path.split('/') if path_node_list[0] == \"\": path_node_list =", "== file_id: return f return -1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param", "IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name = IR[\"newName\"] return 1 elif", "if start_index == -1: start_index = i space_counter += 1 if space_counter >=", "== -1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool,", "import Enum from kernal import Tool class FileOperation(Enum): Read = 0 Write =", "target.data # 写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够 if target.authority == FileAuthority.ReadOnly:", "file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name =", "data, child_nodes=None): \"\"\" 创建文件或文件夹 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param is_folder:是否是文件夹 :param", "Disk) return 1 def readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点", "contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return", "in range(target_file.disk_position, target_file.disk_position + target_file.size): Disk[i] = -1 def findFileById(file_id: int, file_table: list):", ":param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功 \"\"\" folder_obj = findObjByName(old_name, root) if folder_obj is", "== FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else: return target_file.data def delFile(file_name: str,", "None or isinstance(target_file, Folder): print('文件不存在') # TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if", "in parent_folder.child_nodes: if str(node) == name and isinstance(node, UserFile): # 同路径重名 return -1", "if target_folder is None or isinstance(target_folder, UserFile): print('文件夹不存在') # TODO:异常处理 return 0 assert", "= None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param", "target_file.authority == FileAuthority.WriteOnly: print('文件权限不足') # TODO:异常处理 return -1 else: return target_file.data def delFile(file_name:", "\"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\" self.id =", "# 是目录 if isinstance(parent_node, Folder): data = [] child_nodes = list(parent_node.child_nodes) for child", "parent_node.child_nodes: return None child_node_names = list(map(str, parent_node.child_nodes)) if name in child_node_names: return parent_node.child_nodes[child_node_names.index(name)]", "is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] == \"createFile\": return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'],", "list(parent_node.child_nodes) for child in child_nodes: data.append(FileTree(child)) return {parent_node.__str__(): data} elif isinstance(parent_node, UserFile): return", "f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder): data = [] child_nodes =", "creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list, Disk: list, data, child_nodes=None): \"\"\"", "= FileAuthority.Default): \"\"\" 文件数据结构 :param file_name:文件名 :param parent_folder:父节点文件夹,每个文件该属性必须有值 :param data:文件数据 :param authority:文件权限 \"\"\"", ":param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name == 'root': return '/root' if", "= -1 def findFileById(file_id: int, file_table: list): \"\"\" 通过文件id返回文件对象 该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象 :param file_table: 文件表", "default_folder_3] return state, root_node, disk, f_table def FileTree(parent_node): # 是目录 if isinstance(parent_node, Folder):", "2 Rename = 3 Delete = 4 Redirect = 5 class FileAuthority(Enum): Default", "if str(node) == name and isinstance(node, UserFile): # 同路径重名 return -1 new_file =", "= math.ceil(len(data) / 10) \"\"\" size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值 eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure", "isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name: str,", "+ file_to_allocated.size): Disk[j] = file_to_allocated.id return start_index else: start_index = -1 space_counter =", "FileAuthority.ReadOnly: return -1 # 写数据 else: clearFileInDisk(target, Disk) target.data = IR[\"content\"] target.size =", "path_node_list[1:] if len(path_node_list) < 1 or path_node_list[0] != 'root': return 0 # 从root出发", "= 0 Write = 1 Create = 2 Rename = 3 Delete =", "def delFile(file_name: str, file_table: list, root: Folder, Disk: list): \"\"\" 彻底删除文件,包括磁盘和文件表的记录 :param Disk:", "else: start_index = -1 space_counter = 0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT?", "if not is_folder: path_now = target_file.file_name parent_node = target_file.parent_node else: path_now = target_folder.folder_name", "TODO:异常处理 return 0 child_node_names = list(map(str, folder_obj.parent_node.child_nodes)) if new_name in child_node_names: print('新名称在同路径下冲突') return", "start_index == -1: start_index = i space_counter += 1 if space_counter >= file_to_allocated.size:", "0 def clearFileInDisk(target_file: UserFile, Disk: list): \"\"\" 在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件", "文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\")", "pass def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list, Disk: list, data,", "\"\"\" 通过路径找到文件/文件夹 :param root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令", "IR[\"newName\"] return 1 elif path_node_list[i] in child_node_names: parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])] child_node_names = list(map(str,", "if not state: state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk,", "data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if not path_node_list[i] in child_node_names: return 0", "is_folder: path_now = target_file.file_name parent_node = target_file.parent_node else: path_now = target_folder.folder_name parent_node =", "3 Delete = 4 Redirect = 5 class FileAuthority(Enum): Default = 0 ReadOnly", "__str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘", "= root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)):", "TODO:异常处理 return 0 assert isinstance(target_file, UserFile) if target_file.authority == FileAuthority.ReadOnly: print('文件权限不足') # TODO:异常处理", "1 elif IR[\"operator\"] == \"delFile\": if isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk)", "return -1 def findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象", "1 Create = 2 Rename = 3 Delete = 4 Redirect = 5", "UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name: str, root:", "\"\"\" 路径的格式为: /root/aaa/w 以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹 \"\"\" def pathToObj(path: str, IR: dict, file_table: list, Disk:", "= 3 Delete = 4 Redirect = 5 class FileAuthority(Enum): Default = 0", "'root': return '/root' if not is_folder: path_now = target_file.file_name parent_node = target_file.parent_node else:", "-1 # 读数据 else: return target.data # 写文件 elif IR[\"operator\"] == \"writeFile\": #", "renameFile(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点 :param old_name:旧名称", "file_table=f_table) if not state: state = True default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None,", "assert isinstance(target_file, UserFile) clearFileInDisk(target_file, Disk) file_table.remove(target_file) target_file.parent_node.child_nodes.remove(target_file) return 1 def redirectFile(file_name: str, target_folder_name:", "在物理磁盘中删除文件信息 :param Disk: 文件系统磁盘 :param target_file:欲删除的文件 \"\"\" for i in range(target_file.disk_position, target_file.disk_position +", "-1: if start_index == -1: start_index = i space_counter += 1 if space_counter", "name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not parent_node.child_nodes: return None child_node_names = list(map(str,", "'default_folder_2', root_node, data=None, Disk=disk, file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table)", "handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position = -1 # 文件在磁盘中的位置 self.authority = authority def __str__(self):", "= creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This is a", "利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name", "= file_to_allocated.id return start_index else: start_index = -1 space_counter = 0 return -1", "[] if is_folder: if parent_folder is not None: for node in parent_folder.child_nodes: if", "target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name == 'root': return '/root'", ":return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name == 'root': return '/root' if not is_folder:", "- 1: # 单纯的查询文件目录树 if IR is None: return parent_node.child_nodes[child_node_names.index(path_node_list[i])] elif IR[\"operator\"] ==", "str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param Disk: 文件系统磁盘 :param root: 文件系统根节点", "1 def renameFile(old_name: str, new_name: str, root: Folder): \"\"\" 重命名文件 :param root: 文件系统根节点", "return -1 else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size = math.ceil(len(content) / 10)", "# TODO:异常处理 return -2 parent_folder.child_nodes.append(new_file) return new_file def getPath(is_folder: bool, target_folder: Folder =", "# TODO:异常处理 return 0 target_folder = findObjByName(target_folder_name, root) if target_folder is None or", "TODO:异常处理 return -1 else: return target_file.data def delFile(file_name: str, file_table: list, root: Folder,", "-1 else: file_obj.file_name = new_name return 1 def writeFile(file_name: str, content: str, root:", "# 文件在磁盘中的位置 self.authority = authority def __str__(self): return self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk:", "space_counter = 0 for i in range(len(Disk)): # 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片 if Disk[i] == -1:", "\"\"\" self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder self.data = data", "is not None: for node in parent_folder.child_nodes: if str(node) == name and isinstance(node,", "parent_folder self.child_nodes = child_nodes def __str__(self): return self.folder_name class UserFile: def __init__(self, file_name:", ":param file_table: 文件表 :param file_id:文件标识符 :return:文件对象。若返回-1表明没有找到对应标识符的文件 \"\"\" for f in file_table: if f.id", "elif IR[\"operator\"] == \"renameFile\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.file_name", "# 读数据 else: return target.data # 写文件 elif IR[\"operator\"] == \"writeFile\": # 权限不够", "findObjByName(name: str, parent_node): \"\"\" 利用递归,查找除了root文件夹以外的文件系统对象 :param name:文件/文件夹名称 :param parent_node:该参数用于递归,调用时必须传入root文件系统节点 :return:None表示没有该对象,否则返回文件系统对象 \"\"\" if not", "str, root: Folder): \"\"\" 在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作 :param root: 文件系统根节点 :param file_name:欲重定向的文件名称 :param target_folder_name:欲重定向的目标文件夹 :return:返回0表示无法找到对应文件,返回1表明重定向成功", ":param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder self.data", "-1 else: target.file_name = IR[\"newName\"] return 1 elif IR[\"operator\"] == \"renameFolder\": if IR[\"newName\"]", "root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件,-1表示新名字重名,1表示改名成功 \"\"\" file_obj = findObjByName(old_name, root) if", "\"renameFolder\": if IR[\"newName\"] in child_node_names: print('新名称在同路径下冲突') return -1 else: target.folder_name = IR[\"newName\"] return", "if isinstance(target, Folder): return 0 else: clearFileInDisk(target, Disk) file_table.remove(target) target.parent_node.child_nodes.remove(target) return 1 elif", "# TODO:异常处理 return -1 else: clearFileInDisk(target_file, Disk) target_file.data = content target_file.size = math.ceil(len(content)", ":param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0 \"\"\" path = path.replace(\" \", \"\") path_node_list", "# 文件表,存储所有已经建立的文件 root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if not state:", "yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1 eg2. I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3 \"\"\" self.disk_position", "= 0 return -1 def writeDiskToTXT(): # TODO:把结果输出到TXT? pass def creatFileOrFolder(is_folder: bool, name:", "Folder = None, target_file: UserFile = None): \"\"\" 利用递归获取文件/文件夹的路径 :param is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹", "# TODO:异常处理 return 0 assert isinstance(target_file, UserFile) assert isinstance(target_folder, Folder) target_file.parent_node.child_nodes.remove(target_file) target_file.parent_node =", "child_nodes: list): \"\"\" 文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间 :param folder_name:文件夹名 :param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None :param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件 \"\"\" self.id =", "root # 每次都会更新子节点们 child_node_names = list(map(str, parent_node.child_nodes)) for i in range(1, len(path_node_list)): if", "data:文件数据 :param authority:文件权限 \"\"\" self.id = Tool.uniqueNum() self.file_name = file_name self.parent_node = parent_folder", "new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position ==", "# 同路径重名 return -1 new_file = UserFile(name, parent_folder, data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file,", "data) file_table.append(new_file) new_file.disk_position = contiguousAllocation(new_file, Disk) if new_file.disk_position == -1: print('磁盘空间分配错误') # TODO:异常处理", "creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table) if not state: state = True default_folder_1", "if parent_folder is not None: for node in parent_folder.child_nodes: if str(node) == name", "= math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file, Disk) return 1 def readFile(file_name: str,", ":param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name == 'root': return", "1 def writeFile(file_name: str, content: str, root: Folder, Disk: list): \"\"\" 写文件内容(原先内容会删除) :param", "file_table=f_table) default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk, file_table=f_table) creatFileOrFolder(False, 'test', default_folder_1, data='This", "-1 else: folder_obj.folder_name = new_name return 1 def renameFile(old_name: str, new_name: str, root:", "creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk, file_table=file_table) else: # 不存在问题 if not path_node_list[i] in", "is_folder:欲获取路径的对象是否是文件夹 :param target_folder:目标文件夹 :param target_file:目标文件 :return:目标对象的路径 \"\"\" if is_folder and target_folder.folder_name == 'root':", "self.file_name def contiguousAllocation(file_to_allocated: UserFile, Disk: list): \"\"\" 磁盘文件连续分配 :param Disk: 文件系统磁盘 :param file_to_allocated:需要分配磁盘空间的文件", "root:文件系统根节点 :param Disk: 文件系统磁盘 :param file_table: 文件表 :param IR: 直接执行指令 :param path:文件字符串 :return:文件/文件夹对象。若查找错误,返回0", "str, root: Folder): \"\"\" 重命名文件夹 :param root: 文件系统根节点 :param old_name:旧名称 :param new_name:新名称 :return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功", "1 def readFile(file_name: str, root: Folder): \"\"\" 读取文件 :param root: 文件系统根节点 :param file_name:文件名称", "clearFileInDisk(target_file, Disk) target_file.data = content target_file.size = math.ceil(len(content) / 10) target_file.disk_position = contiguousAllocation(target_file,", "in range(1, len(path_node_list)): if i == len(path_node_list) - 1: # 单纯的查询文件目录树 if IR" ]
[ "the program will not remove all intermediary files in the folder tmp after", "(home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############') ###Calling spades_recipee.py to generate", "os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco = e break if e.find('short_summary') > -1", "the fraction of nodes set with respect to total existing nodes.') args =", "noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup", "True: print (\"... tmp files havee been kept\") else: print (\"... removed tmp", "exist, it creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile == True: message =", "= os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options###", "required=True, help='Directory where all the output files will be generated. Required.') parser.add_argument('-o', '--output_name',", "a number higher than total, it will use total. If set a number", "df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory### if args.keep_tmp == True: existence =", "args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+", "of configuration.txt if setting by default### config_path = args.configuration if not args.configuration: selfpath", "help=\"If this tag is active, the program will omit the plots at the", "args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\", \"\",", "if line[0] == \"#\": continue elif line[0] == \"+\": prev = line[1:-1] config_dict[prev]", "will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true',", "'w') existence.close() print (\"Now I'm cleaning tmp...\") if args.keep_tmp == True: print (\"...but", "the different programs. If false, the program will assign a name consisting of", "config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0]", "vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID,", "\"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o", "allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size),", "prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is not a file. If", "from datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory", "skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active it", "import trimming from varcall_recipee import var_call from datetime import datetime parser = argparse.ArgumentParser(description=desc,", "if chunk[5] == \"1\": libstring = libstring + os.path.abspath(chunk[0]) + \" \" +", "if not args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '':", "library names, including putting absolute paths # libstring = '' backstring = ''", "(champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter)", "the main library to use. This is set to accelerate the assembly process", "performing the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default will", "\"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1] elif line[0] == \">\":", "+ os.path.abspath(chunk[6]) + \" \" elif chunk[5] == \"2\": continue else: backstring =", "assembly software to use. By default it will use dipSPAdes. Options are: dipSPADEs,", "args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands =", "tag is active, the program will skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False,", "config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler ==", "if args.no_reduction == False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly", "total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit *", "computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is active, the program", "of the computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to", "= i.split() if chunk[5] == \"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if", "== True: print (\"... tmp files havee been kept\") else: print (\"... removed", "elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \" \" elif", "the identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to use", "elif args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1],", "\"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler ==", "if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output", "print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print", "vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots import katplot, allplots", "'--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use. By default", "args.job_id else id_generator() name = args.output_name if args.output_name else job_ID print ('###############') print", "home = config_dict[\"karyon\"][0] if home[-1] != \"/\": home = home + \"/\" prepared_libs", "of redundans. Remember that the step is used to perform many downstream analyses.", "katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf,", "will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag is active, the", "assembly step. It requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this", "chunk = i.split() if chunk[5] == \"1\": libstring = libstring + os.path.abspath(chunk[0]) +", "counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size)", "by default### config_path = args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path =", "int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) *", "open(home + \"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning", "def select_champion(fastq): parse_dict = {} for i in open(fastq): chunk = i.split() if", "programs. If false, the program will assign a name consisting of a string", "6 character string. If prefix name is not defined, it uses job ID###", "if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if i[-4:] == \".vcf\": vcf =", "ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the prefered one for", "args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly =", "psutil import pysam import pandas as pd import string import random from spades_recipee", "= args.no_assembly if args.no_reduction == False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f", "'--scafmaxsize', default=False, help=\"Will ignore scaffolds with length above the given threshold\") parser.add_argument('-a', '--try_again',", "call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly =", "not launch the reduction step of redundans. Remember that the step is used", "elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\",", "args.no_assembly if args.no_reduction == False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+", "default### config_path = args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')]", "print (true_output) print (counter) print ('###############') ###Calling spades_recipee.py to generate the assembly job.", "tasks of the computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes", "args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory### if args.keep_tmp ==", "'--no_varcall', nargs='+', default=False, help=\"If this tag is active, the program will skip the", "name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0],", "call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly =", "if args.favourite == False: for element in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion", "args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in", "###Parses the libraries and checks their parameters for downstream analyses. Also performs trimming.###", "os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot)", "active, the program will omit the plots at the end of the the", "config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] != \"/\": home = home", "+ \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not", "file. If it does not exist, it creates it.### if not os.path.isdir(args.output_directory): if", "champion = select_champion(prepared_libs) print ('###############') print ('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\")", "libstring = libstring + os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) + \" \"", "#karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction ==", "for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco = i break if", "(true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home", "== True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return", "== \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print", "job ID### job_ID = args.job_id if args.job_id else id_generator() name = args.output_name if", "is active, the program will omit the plots at the end of the", "if args.keep_tmp == True: existence = open(home + \"tmp/\" + job_ID + '/_keep_existing_',", "programs.### def parse_config(config): config_dict = {} prev = 0 for line in open(config):", "!= True: from shutil import copyfile mybusco = False for i in os.listdir(true_output+name+\"_busco\"):", "path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID))", "-t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \"", "mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots import katplot, allplots from report", "prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0] !=", "assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\"", "('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands = '' else: trimmo_commands =", "+ true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job file", "an exception an exit the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False:", "output is not a file. If it does not exist, it creates it.###", "true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter", "of the analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of", "to select the assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch", "if i[-4:] == \".vcf\": vcf = i if os.path.isfile(i+\".vcf\") == True: vcf =", "be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the prefered one", "call_SPAdes from prepare_libraries import preparation from trimming_libraries import trimming from varcall_recipee import var_call", "\" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly +", "'--keep_tmp', action='store_true', default=False, help='If this tag is active, the program will not remove", "\" elif line[0] == \"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1]", "+ \" -o \" + name + busco_options + \"\\n\") karyonjobfile.write(\"mv \" +", "if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction != True:", "args.keep_tmp == True: print (\"... tmp files havee been kept\") else: print (\"...", "ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\",", "(true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0])", "below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with length above", "name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp", "import sys, os, re import argparse import psutil import pysam import pandas as", "job ID is not user defined, it produces a random 6 character string.", "\"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+", "trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for", "the analyses may not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this", "won't be possible if you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this", "the assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False", "By default will use ./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades',", "elif chunk[5] == \"2\": continue else: backstring = backstring + os.path.abspath(chunk[0]) + \"", "datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the", "for e in os.listdir(home + \"tmp/\"): for i in os.listdir(home + \"tmp/\"+e): if", "line[1:-1] + \" \" elif line[0] == \"?\": if config_dict[prev][2] != \"\": continue", "karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks", "True: existence = open(home + \"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close() print", "'', '', '' for i in no_varcall: if i[-4:] == \".bam\": bam =", "not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag is active,", "[0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location of", "options### total_nodes = n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes: n_nodes", "to check the parameters of all the programs.### def parse_config(config): config_dict = {}", "os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is", "in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home", "assembly job. In the future it should use config file to select the", "import argparse import psutil import pysam import pandas as pd import string import", "the computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to use.", "file. By default will use ./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\",", "line[0] == \"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1] + \"", "\" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \"", "ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 *", "elif args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2", "== True: existence = open(home + \"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close()", "+ \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df,", "selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM", "return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location of configuration.txt", "if args.no_varcall == False: from karyonplots import katplot, allplots from report import report,", "\"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly assembly =", "\"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3", "true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize,", "int(args.max_scaf2plot) if args.no_busco != True: from shutil import copyfile mybusco = False for", "program will assign a name consisting of a string of 6 random alphanumeric", "set a number lower than total, it will calculate memory usage based on", "sys.exit(1) def main(): ###Defines the location of configuration.txt if setting by default### config_path", "for scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with", "removed tmp files!\") if __name__ == '__main__': t0 = datetime.now() try: main() except", "karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly + \" -o \" +", "skip it, the analyses may not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False,", "soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\"", "print ('###############') print ('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print", "print ('###############') print ('Preparing libraries') print ('###############') libs = '' for i in", "\"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch", "config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam,", "it will use total. If set a number lower than total, it will", "print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print", "\"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0]", "args.no_reduction == False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly +", "program will not launch the reduction step of redundans. Remember that the step", "if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes =", "karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\"", "== \"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1] + \" \"", "os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco = i break if i.find('short_summary') > -1", "== False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\"", "for the variant calling analysis. Otherwise, karyon will select the largest library for", "os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco != True: from shutil import", "= '' if not args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1]", "config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2", "-c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash", "true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import katplot, allplots", "\"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch == True: reduced_assembly", "for i in no_varcall: if i[-4:] == \".bam\": bam = i if os.path.isfile(i+\".bam\")", "= parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home", "= '' else: trimmo_commands = \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands,", "args.output_name else job_ID print ('###############') print ('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\")", "os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True: print", "if i.find('short_summary') > -1 and mybusco != False: if i.find(\"specific\") == -1: mybusco", "generate a random string. This random string will be the same as the", "home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed =", "given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results and skips already", "an exit the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory)", "else: pass else: no_red_assembly = args.no_assembly assembly = args.no_assembly if args.no_reduction == False", "parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is active, BUSCO analysis will be", "generated by the different programs. If false, the program will assign a name", "job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning tmp...\") if args.keep_tmp ==", "check the parameters of all the programs.### def parse_config(config): config_dict = {} prev", "n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler", "bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if i.find(\"pileup\") >", "args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2,", "libraries for any of the steps will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False,", "line[1:-1] + \" \" return config_dict ###Selects the main library to use. This", "if args.no_busco == False: for i in config_dict['BUSCO'][1:]: busco_options = busco_options + \"", "help='Number of computation nodes to use. If set a number higher than total,", "home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if", "== True: reduced_assembly = assembly else: reduced_assembly = assembly busco_options = \"\" if", "allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter,", "the same as the identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq", "line[0] == \"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\":", "\"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly", "file in order to check the parameters of all the programs.### def parse_config(config):", "i in open(prepared_libs): chunk = i.split() if chunk[5] == \"1\": libstring = libstring", "parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output", "+ \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in", "i in config_dict['BUSCO'][1:]: busco_options = busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco", "== \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\",", "('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands = '' else: trimmo_commands = \" -c", "steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is active, the program will", "the steps will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library as", "clean the tmp directory### if args.keep_tmp == True: existence = open(home + \"tmp/\"", "== \"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1] elif line[0] ==", "use ./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'],", "+ reduced_assembly + \" -o \" + name + busco_options + \"\\n\") karyonjobfile.write(\"mv", "if setting by default### config_path = args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0]))", "'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use. By default it will use", "== \"2\": continue else: backstring = backstring + os.path.abspath(chunk[0]) + \" \" libstring", "config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \" \" elif line[0] == \"?\": if", "chunk[5] == \"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite == False:", "+ job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning tmp...\") if args.keep_tmp", "not exist, it creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile == True: message", "as the prefered one for the variant calling analysis. Otherwise, karyon will select", "'SOAPdenovo'], help=\"Genome assembly software to use. By default it will use dipSPAdes. Options", "= int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction) counter", "been kept\") else: print (\"... removed tmp files!\") if __name__ == '__main__': t0", "for i in open(prepared_libs): chunk = i.split() if chunk[5] == \"1\": libstring =", "files havee been kept\") else: print (\"... removed tmp files!\") if __name__ ==", "'--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to plot for scaffold-specific plots. Default is", "t0 = datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt =", "fraction of nodes set with respect to total existing nodes.') args = parser.parse_args()", "defined, it uses job ID### job_ID = args.job_id if args.job_id else id_generator() name", "os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses", "name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name,", "karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly + \" -o \" + name", "If prefix name is not defined, it uses job ID### job_ID = args.job_id", "switch = False if args.no_assembly == False: if args.genome_assembler == \"dipspades\" or args.genome_assembler", "string import random from spades_recipee import call_SPAdes from prepare_libraries import preparation from trimming_libraries", "This random string will be the same as the identifier for intermediate files.')", "any of the steps will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one", "scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with length", "False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup = '',", "home + \"/\" prepared_libs = home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid", "configuration.txt if setting by default### config_path = args.configuration if not args.configuration: selfpath =", "use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False,", "if i[-4:] == \".bam\": bam = i if os.path.isfile(i+\".bam\") == True: bam =", "+ \"-i \" + reduced_assembly + \" -o \" + name + busco_options", "peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to use. If set a", "true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup,", "import pysam import pandas as pd import string import random from spades_recipee import", "names, including putting absolute paths # libstring = '' backstring = '' for", "calls all the programs if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name, args.favourite,", "\"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass", "true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco", "except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt = datetime.now()-t0 sys.stderr.write(\"#Time elapsed: %s\\n\" %", "lower than total, it will calculate memory usage based on the fraction of", "help='If this tag is active it will skip the assembly step. It requires", "true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i", "bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize,", "= libs + \" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = ''", "-1 and mybusco != False: if i.find(\"specific\") == -1: mybusco = i if", "i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else:", "\"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler", "6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the programs", "from prepare_libraries import preparation from trimming_libraries import trimming from varcall_recipee import var_call from", "selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if", "in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed + \" \" +", "('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands = '' else:", "champion[0]: champion = [int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return champion def exit_program(message):", "this and won't be possible if you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true',", "True: mpileup = i+\".pileup\" if i[-4:] == \".vcf\": vcf = i if os.path.isfile(i+\".vcf\")", "plot for scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds", "library to use. This is set to accelerate the assembly process and improve", "accelerate the assembly process and improve the results def select_champion(fastq): parse_dict = {}", "= selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets", "print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############')", "-f \"+ assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly", "print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job", "else: print (\"... removed tmp files!\") if __name__ == '__main__': t0 = datetime.now()", "improve the results def select_champion(fastq): parse_dict = {} for i in open(fastq): chunk", "help='Sets one library as the prefered one for the variant calling analysis. Otherwise,", "program will skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is", "= i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction", "computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to use. If", "not defined, it uses job ID### job_ID = args.job_id if args.job_id else id_generator()", "Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name))", "('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\")", "\" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if not args.no_trimming: print", "karyon results and skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this", "'--job_id', default=False, help='Identifier of the intermediate files generated by the different programs. If", "use by all programs. By default it will use all available memory (default=1),", "above the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results and", "If omitted, it will generate a random string. This random string will be", "== \"1\": libstring = libstring + os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) +", "order to check the parameters of all the programs.### def parse_config(config): config_dict =", "all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to use by", "'--output_directory', required=True, help='Directory where all the output files will be generated. Required.') parser.add_argument('-o',", "os.listdir(home + \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home +", "True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \"", "downstream analyses. Also performs trimming.### print ('###############') print ('Preparing libraries') print ('###############') libs", "respect to total existing nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits):", "the step is used to perform many downstream analyses. If you skip it,", "import copyfile mybusco = False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1:", "print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print", "busco_downloads\\n\") karyonjobfile.close() #5) Create job file that calls all the programs if args.no_varcall", "not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit =", "print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print", "[int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def", "tag is active, BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If", "percent to avoid freezing other tasks of the computer during peaks.') parser.add_argument('-n', '--nodes',", "config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1] elif line[0] == \">\": config_dict[prev][1] =", "i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots import", "reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name,", "1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to plot for scaffold-specific", "in open(config): if line[0] == \"#\": continue elif line[0] == \"+\": prev =", "much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag is active, the program", "parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with length below the given threshold\") parser.add_argument('-S',", "\">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \" \" elif line[0] == \"?\":", "10000, prepared_libs) libs_parsed = '' if not args.no_trimming: print ('###############') print ('Trimmomatic') print", "of the steps will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library", "pass else: no_red_assembly = args.no_assembly assembly = args.no_assembly if args.no_reduction == False and", "def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location of configuration.txt if setting", "== True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if", "one library as the prefered one for the variant calling analysis. Otherwise, karyon", "limit for all the programs set in Gb. By default it will try", "+ true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if", "will try to use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total", "true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job file that", "ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############') config_dict = parse_config(config_path) home =", "to plot for scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore", "action='store_true', help='Use previous karyon results and skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true',", "help=\"Will ignore scaffolds with length below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will", "By default it will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\")", "!= \"\": continue config_dict[prev][2] = line[1:-1] + \" \" return config_dict ###Selects the", "\"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch == True:", "\"tmp/\"): for i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e):", "== True: vcf = i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall ==", "'--nodes', default=False, help='Number of computation nodes to use. If set a number higher", "(args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0])", "job file that calls all the programs if args.no_varcall == False: var_call(prepared_libs, config_dict,", "in the folder tmp after it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of", "the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with length above the", "library for performing the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By", "no_varcall: if i[-4:] == \".bam\": bam = i if os.path.isfile(i+\".bam\") == True: bam", "scaffolds to plot for scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will", "+ line[1:-1] + \" \" elif line[0] == \"?\": if config_dict[prev][2] != \"\":", "'_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp ==", "message = \"Path is a file\" #Should raise an exception an exit the", "karyonjobfile.write(\"mv \" + name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt", "int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 *", "float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets", "is set to accelerate the assembly process and improve the results def select_champion(fastq):", "\" + name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc", "assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly assembly = args.no_assembly if", "generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the output files. If", "'--favourite', default=False, help='Sets one library as the prefered one for the variant calling", "exception an exit the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory)", "the programs set in Gb. By default it will try to use all", "config_dict[prev][0] = line[1:-1] elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] +", "./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome", "all the programs set in Gb. By default it will try to use", "\"/\" prepared_libs = home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home,", "id_generator() name = args.output_name if args.output_name else job_ID print ('###############') print ('Config. path:", "config_dict[\"karyon\"][0] if home[-1] != \"/\": home = home + \"/\" prepared_libs = home", "folder tmp after it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate", "name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We", "False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t", "args.job_id if args.job_id else id_generator() name = args.output_name if args.output_name else job_ID print", "process and improve the results def select_champion(fastq): parse_dict = {} for i in", "identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to use for", "true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name,", "-o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction ==", "+ \"/\" prepared_libs = home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid =", "no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco != True: from", "name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter =", "args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot)", "use all available memory (default=1), but it may be useful to reduce the", "\"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if", "it, the analyses may not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If", "ID### job_ID = args.job_id if args.job_id else id_generator() name = args.output_name if args.output_name", "if chunk[5] == \"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite ==", "to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly ==", "used for some of the analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20,", "if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_'", "args.no_plot) else: from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam,", "= true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch == True: reduced_assembly = assembly", "= allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\",", "may be useful to reduce the percent to avoid freezing other tasks of", "total existing nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars)", "their parameters for downstream analyses. Also performs trimming.### print ('###############') print ('Preparing libraries')", "\"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly", "keeping what you told me...\") for e in os.listdir(home + \"tmp/\"): for i", "if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\"", "#Should raise an exception an exit the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again", "the variant calling step. Many downstream analyses require this and won't be possible", "true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job file that calls all the", "os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the", "if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit", "step is used to perform many downstream analyses. If you skip it, the", "i.find(\"specific\") == -1: mybusco = i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco", "False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco = e break", "are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag", "assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler ==", "re import argparse import psutil import pysam import pandas as pd import string", "\"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True", "require this and won't be possible if you skip it.\") parser.add_argument('-B', '--no_busco', default=False,", "chunk[5] == \"1\": libstring = libstring + os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6])", "will generate a random string. This random string will be the same as", "number lower than total, it will calculate memory usage based on the fraction", "def main(): ###Defines the location of configuration.txt if setting by default### config_path =", "calling analysis. Otherwise, karyon will select the largest library for performing the variant", "higher than total, it will use total. If set a number lower than", "the output files will be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for", "== False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly + \" -o", "\" \" return config_dict ###Selects the main library to use. This is set", "usage based on the fraction of nodes set with respect to total existing", "possible if you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is", "Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is active, the program will", "parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is active, the program will not", "for the job. If job ID is not user defined, it produces a", "performs trimming.### print ('###############') print ('Preparing libraries') print ('###############') libs = '' for", "== -1: mybusco = i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco =", "config file to select the assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a')", "files generated by the different programs. If false, the program will assign a", "else: ram_limit = args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job ID", "for i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed + \"", "-1: mpileup = i if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\")", "vcf, bam, mpileup = '', '', '' for i in no_varcall: if i[-4:]", "== 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\"", "default it will try to use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion", "args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\"", "sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location of configuration.txt if setting by default###", "used to perform many downstream analyses. If you skip it, the analyses may", "defined, it produces a random 6 character string. If prefix name is not", "'--memory_limit', default=False, help='Memory limit for all the programs set in Gb. By default", "cleaning tmp...\") if args.keep_tmp == True: print (\"...but keeping what you told me...\")", "e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco = e break if e.find('short_summary')", "'--no_busco', default=False, action='store_true', help='If this tag is active, BUSCO analysis will be ommited.')", "###Selects the main library to use. This is set to accelerate the assembly", "analyses may not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag", "from shutil import copyfile mybusco = False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\")", "katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks their parameters", "true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly", "+ true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" +", "parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag is active, the program will skip", "active, BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag", "help='Proportion of total memory to use by all programs. By default it will", "number of scaffolds to plot for scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize',", "to use. By default it will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo", "absolute paths # libstring = '' backstring = '' for i in open(prepared_libs):", "\"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes)", "!= False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup =", "program will not remove all intermediary files in the folder tmp after it", "string of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all", "if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the", "args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean", "karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\")", "def parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '', '' for i in no_varcall:", "and skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is", "the tmp directory### if args.keep_tmp == True: existence = open(home + \"tmp/\" +", "and switch == True: reduced_assembly = assembly else: reduced_assembly = assembly busco_options =", "no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\"", "= [int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1)", "open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly == False: if args.genome_assembler ==", "this tag is active, the program will not launch the reduction step of", "+ i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including putting absolute paths #", "of total memory to use by all programs. By default it will use", "\" \" elif line[0] == \"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2] =", "not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output", "and the prefix name for the job. If job ID is not user", "('###############') print ('Preparing libraries') print ('###############') libs = '' for i in args.libraries:", "def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) ###Parses the", "program will skip the variant calling step. Many downstream analyses require this and", "\" + name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \"", "default it will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T',", "i.split() if chunk[5] == \"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite", "== \"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\": if", "will not remove all intermediary files in the folder tmp after it has", "by all programs. By default it will use all available memory (default=1), but", "\"tmp/\"+e) if args.keep_tmp == True: print (\"... tmp files havee been kept\") else:", "!= \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes = n_nodes =", "= args.output_name if args.output_name else job_ID print ('###############') print ('Config. path: '+str(config_path)) print", "+ name + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \" \"", "If false, the program will assign a name consisting of a string of", "that the output is not a file. If it does not exist, it", "parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag is active, the program will omit", "False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\")", "selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\",", "= 0 for line in open(config): if line[0] == \"#\": continue elif line[0]", "true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home +", "os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup = i", "it will calculate memory usage based on the fraction of nodes set with", "False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system", "If job ID is not user defined, it produces a random 6 character", "False and switch == True: reduced_assembly = assembly else: reduced_assembly = assembly busco_options", "default=False, help='Prefix name for all the output files. If omitted, it will generate", "import psutil import pysam import pandas as pd import string import random from", "nargs='+', help=\"Fastq libraries to use for assembly and variant calling. Unsuitable libraries for", "config_path = args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path", "champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location of configuration.txt if", "Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the output files. If omitted,", "i in args.libraries: libs = libs + \" \" + os.path.abspath(i) preparation(libs.split(), 10000,", "name: \"+str(name)) print ('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] !=", "else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite == False: for element in parse_dict:", "in args.libraries: libs = libs + \" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs)", "'': trimmo_commands = '' else: trimmo_commands = \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs,", "os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly =", "in range(size)) ###Parses the config file in order to check the parameters of", "+ os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) + \" \" elif chunk[5] ==", "and won't be possible if you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If", "job. If job ID is not user defined, it produces a random 6", "variant calling step. Many downstream analyses require this and won't be possible if", "calling step. Many downstream analyses require this and won't be possible if you", "== False: from karyonplots import katplot, allplots from report import report, ploidy_veredict katplot(reduced_assembly,", "the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some of", "+ \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############') ###Calling spades_recipee.py to generate the", "use. If set a number higher than total, it will use total. If", "allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks their parameters for", "int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job ID and the prefix name for", "it creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile == True: message = \"Path", "from spades_recipee import call_SPAdes from prepare_libraries import preparation from trimming_libraries import trimming from", "config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\")", "mpileup = i if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") ==", "calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some of the analyses.", "print ('###############') libs = '' for i in args.libraries: libs = libs +", "\"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite == False: for element", "analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag is active,", "in Gb. By default it will try to use all memory available.') parser.add_argument('-M',", "+ string.digits): return ''.join(random.choice(chars) for _ in range(size)) ###Parses the config file in", "config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler ==", "-r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\"", "-r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" +", "files in the folder tmp after it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier", "('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] != \"/\": home =", "== True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if", "analyses. Also performs trimming.### print ('###############') print ('Preparing libraries') print ('###############') libs =", "###Parsing library names, including putting absolute paths # libstring = '' backstring =", "SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is active, the", "if config_dict['trimmomatic'][1] == '': trimmo_commands = '' else: trimmo_commands = \" -c \"", "one for the variant calling analysis. Otherwise, karyon will select the largest library", "noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '', '' for", "random 6 character string. If prefix name is not defined, it uses job", "name is not defined, it uses job ID### job_ID = args.job_id if args.job_id", "\"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" +", "variant calling analysis. Otherwise, karyon will select the largest library for performing the", "default=False, help='Sets one library as the prefered one for the variant calling analysis.", "-o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\"", "from karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and", "'--output_name', default=False, help='Prefix name for all the output files. If omitted, it will", "reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name,", "True: noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco", "parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is active, the program will skip", "\"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed +", "active, the program will not launch the reduction step of redundans. Remember that", "katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks their parameters for downstream", "string.digits): return ''.join(random.choice(chars) for _ in range(size)) ###Parses the config file in order", "mybusco = False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco =", "if e.find(\"specific\") > -1: noredubusco = e break if e.find('short_summary') > -1 and", "libraries and checks their parameters for downstream analyses. Also performs trimming.### print ('###############')", "os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is not a file. If it does", "\" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction", "(true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\")", "assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler ==", "file that calls all the programs if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output,", "index=False) ###We clean the tmp directory### if args.keep_tmp == True: existence = open(home", "(\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job name:", "('###############') ###Calling spades_recipee.py to generate the assembly job. In the future it should", "bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup = i if os.path.isfile(i+\".mpileup\") ==", "config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\"", "it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is active, BUSCO analysis will", "True: print (\"...but keeping what you told me...\") for e in os.listdir(home +", "parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to plot for scaffold-specific plots. Default", "= args.job_id if args.job_id else id_generator() name = args.output_name if args.output_name else job_ID", "= i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if i[-4:] == \".vcf\":", "= ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco)", "More info at: https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys,", "!= False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction != True: noredubusco =", "step. It requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag", "active it will skip the assembly step. It requires a reference assembly.') parser.add_argument('-R',", "may not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag is", "print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print", "= i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return vcf, bam, mpileup", "by the different programs. If false, the program will assign a name consisting", "(\"Now I'm cleaning tmp...\") if args.keep_tmp == True: print (\"...but keeping what you", "from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py", "os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] !=", "what you told me...\") for e in os.listdir(home + \"tmp/\"): for i in", "action='store_true', help='If this tag is active, BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot',", "= i+\".pileup\" if i[-4:] == \".vcf\": vcf = i if os.path.isfile(i+\".vcf\") == True:", "> -1: mpileup = i if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if", "\" + no_red_assembly + \" -o \" + name+\"_no_reduc\" + busco_options + \"\\n\")", "pipeline. More info at: https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import", "and int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000", "= {} for i in open(fastq): chunk = i.split() if chunk[5] == \"2\":", "prepare_libraries import preparation from trimming_libraries import trimming from varcall_recipee import var_call from datetime", "to use. This is set to accelerate the assembly process and improve the", "print (counter) print ('###############') ###Calling spades_recipee.py to generate the assembly job. In the", "args.no_assembly == False: if args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0],", "previous karyon results and skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If", "print (\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############') config_dict = parse_config(config_path)", "true_output, False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if", "for all the output files. If omitted, it will generate a random string.", "https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os, re import", "if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed + \" \" + true_output +", "configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to", "(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py", "help=\"Configuration file. By default will use ./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler',", "total memory to use by all programs. By default it will use all", "\"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o", "False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction != True: noredubusco = False", "memory to use by all programs. By default it will use all available", "bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0],", "import katplot, allplots from report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df", "\"-i \" + no_red_assembly + \" -o \" + name+\"_no_reduc\" + busco_options +", "\"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly", "('Preparing libraries') print ('###############') libs = '' for i in args.libraries: libs =", "-o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo", "is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with length below the given", "programs set in Gb. By default it will try to use all memory", "job. In the future it should use config file to select the assembly", "as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly", "print ('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job", "name = args.output_name if args.output_name else job_ID print ('###############') print ('Config. path: '+str(config_path))", "== False and switch == True: reduced_assembly = assembly else: reduced_assembly = assembly", "+ \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm", "if args.no_reduction != True: noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\")", "default=False, help='If this tag is active, the program will not remove all intermediary", "backstring = backstring + os.path.abspath(chunk[0]) + \" \" libstring = libstring + backstring", "prefix name for the job. If job ID is not user defined, it", "os, re import argparse import psutil import pysam import pandas as pd import", "+ true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close()", "range(size)) ###Parses the config file in order to check the parameters of all", "True: from shutil import copyfile mybusco = False for i in os.listdir(true_output+name+\"_busco\"): if", "else: champion = [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines", "args.no_reduction != True: noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") >", "i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if i[-4:] == \".vcf\": vcf", "print (\"Now I'm cleaning tmp...\") if args.keep_tmp == True: print (\"...but keeping what", "== -1: noredubusco = e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco =", "+ \"\\n\") karyonjobfile.write(\"mv \" + name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \"", "active, the program will not remove all intermediary files in the folder tmp", "'' for i in args.libraries: libs = libs + \" \" + os.path.abspath(i)", "this tag is active it will skip the assembly step. It requires a", "true_output) ###Sets RAM usage options### total_nodes = n_nodes = psutil.cpu_count() if args.nodes and", "= int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)", "By default it will use all available memory (default=1), but it may be", "computation nodes to use. If set a number higher than total, it will", "assembly process and improve the results def select_champion(fastq): parse_dict = {} for i", "(config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############') ###Calling spades_recipee.py", "= [\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0] =", "preparation from trimming_libraries import trimming from varcall_recipee import var_call from datetime import datetime", "\"-i \" + reduced_assembly + \" -o \" + name + busco_options +", "reduced_assembly = assembly busco_options = \"\" if args.no_busco == False: for i in", "reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch == True: reduced_assembly =", "print ('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands = ''", "df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory###", "set with respect to total existing nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase", "program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots", "generate the assembly job. In the future it should use config file to", "-1: libs_parsed = libs_parsed + \" \" + true_output + i preparation(libs_parsed.split(), 10000,", "output files will be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all", "False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\"", "###Sets the job ID and the prefix name for the job. If job", "vcf = i if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") ==", "if args.no_busco != True: from shutil import copyfile mybusco = False for i", "variant calling. Unsuitable libraries for any of the steps will be ignored. Required.\")", "total_nodes = n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes: n_nodes =", "Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this", "will use all available memory (default=1), but it may be useful to reduce", "config_dict['trimmomatic'][1] == '': trimmo_commands = '' else: trimmo_commands = \" -c \" +", "\"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes = n_nodes = psutil.cpu_count()", "is not a file. If it does not exist, it creates it.### if", "\" return config_dict ###Selects the main library to use. This is set to", "-r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else:", "to use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to", "to avoid freezing other tasks of the computer during peaks.') parser.add_argument('-n', '--nodes', default=False,", "will skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active", "i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup = i if os.path.isfile(i+\".mpileup\") == True: mpileup", "select the assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch =", "n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler", "assign a name consisting of a string of 6 random alphanumeric characters.') parser.add_argument('-m',", "os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries", "allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter,", "args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco != True: from shutil", "pd import string import random from spades_recipee import call_SPAdes from prepare_libraries import preparation", "n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit =", "a random 6 character string. If prefix name is not defined, it uses", "\"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True: print (\"... tmp", "\"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs", "parameters for downstream analyses. Also performs trimming.### print ('###############') print ('Preparing libraries') print", "produces a random 6 character string. If prefix name is not defined, it", "creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile == True: message = \"Path is", "true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo", "location of configuration.txt if setting by default### config_path = args.configuration if not args.configuration:", "make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag is active, the", "karyonjobfile.write(\"mv \" + name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt", "'--memory_fraction', default=1, help='Proportion of total memory to use by all programs. By default", "= true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly assembly = args.no_assembly if args.no_reduction", "(<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os, re import argparse import psutil import", "backstring + os.path.abspath(chunk[0]) + \" \" libstring = libstring + backstring champion =", "if __name__ == '__main__': t0 = datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C", "10000, prepared_libs) ###Parsing library names, including putting absolute paths # libstring = ''", "+ no_red_assembly + \" -o \" + name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv", "parse_config(config): config_dict = {} prev = 0 for line in open(config): if line[0]", "\" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job file that calls", "name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\")", "-i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch =", "= e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall):", "path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs =", "and noredubusco != False: if e.find(\"specific\") == -1: noredubusco = e if noredubusco", "busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp", "the program will assign a name consisting of a string of 6 random", "no_red_assembly = args.no_assembly assembly = args.no_assembly if args.no_reduction == False and switch ==", "args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly + \"", "be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag is active, the program", "= chunk[1:] champion=[0,''] if args.favourite == False: for element in parse_dict: if int(parse_dict[element][2])", "os.listdir(home + \"tmp/\"): for i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home", "the assembly job. In the future it should use config file to select", "true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID,", "default=False, help='If this tag is active, the program will skip the trimming step.')", "true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5)", "\"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\")", "noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco =", "if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf =", "\" + os.path.abspath(chunk[6]) + \" \" elif chunk[5] == \"2\": continue else: backstring", "the assembly step. It requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If", "name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler", "\" + name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \"", "true_output+name+\".busco\" if args.no_reduction != True: noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if", "os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if i[-4:] == \".vcf\": vcf = i", "'' for i in open(prepared_libs): chunk = i.split() if chunk[5] == \"1\": libstring", "'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly", "help=\"Fastq libraries to use for assembly and variant calling. Unsuitable libraries for any", "\"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1])", "\"+ assembly + \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly =", "prepared_libs) ###Parsing library names, including putting absolute paths # libstring = '' backstring", "True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if i.find(\"pileup\")", "true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs,", "if args.output_name else job_ID print ('###############') print ('Config. path: '+str(config_path)) print (\"RAM Limit:", "if you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is active,", "args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory)", "is not user defined, it produces a random 6 character string. If prefix", "trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home +", "False: if i.find(\"specific\") == -1: mybusco = i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco,", "default=False, help='Memory limit for all the programs set in Gb. By default it", "-1: noredubusco = e break if e.find('short_summary') > -1 and noredubusco != False:", "programs. By default it will use all available memory (default=1), but it may", "use config file to select the assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\",", "trimmo_commands = \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\",", "length below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with length", "step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active it will skip the", "libraries') print ('###############') libs = '' for i in args.libraries: libs = libs", "args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction,", "os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots", "libs_parsed = libs_parsed + \" \" + true_output + i preparation(libs_parsed.split(), 10000, prepared_libs)", "args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True,", "libs + \" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if", "will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the prefered", "os.path.isdir(args.output_directory): if os.path.isfile == True: message = \"Path is a file\" #Should raise", "backstring champion = select_champion(prepared_libs) print ('###############') print ('Params') print ('###############') print (args.window_size) print", "karyon will select the largest library for performing the variant calling protocol.') parser.add_argument('-c',", "and improve the results def select_champion(fastq): parse_dict = {} for i in open(fastq):", "= '', '', '' for i in no_varcall: if i[-4:] == \".bam\": bam", "will skip the assembly step. It requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true',", "\"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else:", "args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job ID and the prefix", "04/Nov/2021\"\"\" import sys, os, re import argparse import psutil import pysam import pandas", "os.path.abspath(chunk[6]) + \" \" elif chunk[5] == \"2\": continue else: backstring = backstring", "for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco = e break if", "noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory### if args.keep_tmp == True: existence", "True: reduced_assembly = assembly else: reduced_assembly = assembly busco_options = \"\" if args.no_busco", "or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is active, the program", "help='Identifier of the intermediate files generated by the different programs. If false, the", "in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True:", "name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\")", "args.no_busco == False: for i in config_dict['BUSCO'][1:]: busco_options = busco_options + \" \"", "mpileup = '', '', '' for i in no_varcall: if i[-4:] == \".bam\":", "prev = 0 for line in open(config): if line[0] == \"#\": continue elif", "= args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job ID and the", "if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly,", "set to accelerate the assembly process and improve the results def select_champion(fastq): parse_dict", "for downstream analyses. Also performs trimming.### print ('###############') print ('Preparing libraries') print ('###############')", "with length above the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon", "mybusco = true_output+name+\".busco\" if args.no_reduction != True: noredubusco = False for e in", "= False if args.no_assembly == False: if args.genome_assembler == \"dipspades\" or args.genome_assembler ==", "(true_output) print (counter) print ('###############') ###Calling spades_recipee.py to generate the assembly job. In", "(float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job", "'__main__': t0 = datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt", "\" elif chunk[5] == \"2\": continue else: backstring = backstring + os.path.abspath(chunk[0]) +", "no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction", "memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to use by all", "the program will skip the variant calling step. Many downstream analyses require this", "with length below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with", "0 for line in open(config): if line[0] == \"#\": continue elif line[0] ==", "True: message = \"Path is a file\" #Should raise an exception an exit", "== \".vcf\": vcf = i if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if", "default=False, help=\"If this tag is active, the program will omit the plots at", "= config_dict[\"karyon\"][0] if home[-1] != \"/\": home = home + \"/\" prepared_libs =", "else id_generator() name = args.output_name if args.output_name else job_ID print ('###############') print ('Config.", "e.find(\"specific\") == -1: noredubusco = e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco", "(\"...but keeping what you told me...\") for e in os.listdir(home + \"tmp/\"): for", "for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to use for assembly", "parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the output files. If omitted, it", "\"#\": continue elif line[0] == \"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif", "true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including putting absolute paths", "+ \"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i)", "os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\"", "+ backstring champion = select_champion(prepared_libs) print ('###############') print ('Params') print ('###############') print (args.window_size)", "files. If omitted, it will generate a random string. This random string will", "os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is not a", "paths # libstring = '' backstring = '' for i in open(prepared_libs): chunk", "switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o \"+true_output+\"redundans_output -i", "{} prev = 0 for line in open(config): if line[0] == \"#\": continue", "config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot)", "+ \" \" return config_dict ###Selects the main library to use. This is", "job ID and the prefix name for the job. If job ID is", "== True: message = \"Path is a file\" #Should raise an exception an", "than total, it will calculate memory usage based on the fraction of nodes", "import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\"", "i if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam", "'--scafminsize', default=False, help=\"Will ignore scaffolds with length below the given threshold\") parser.add_argument('-S', '--scafmaxsize',", "not a file. If it does not exist, it creates it.### if not", "intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to use for assembly and", "true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '', '' for i in", "+ \" -o \" + name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \" +", "True: vcf = i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False:", "+ \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e):", "threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results and skips already computed", "parse_dict = {} for i in open(fastq): chunk = i.split() if chunk[5] ==", "True: bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup = i if os.path.isfile(i+\".mpileup\")", "var_call from datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True,", "(true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output)", "be the same as the identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+',", "reduced_assembly + \" -o \" + name + busco_options + \"\\n\") karyonjobfile.write(\"mv \"", "kept\") else: print (\"... removed tmp files!\") if __name__ == '__main__': t0 =", "\" \" elif chunk[5] == \"2\": continue else: backstring = backstring + os.path.abspath(chunk[0])", "'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly == False: if args.genome_assembler == \"dipspades\"", "= true_output+name+\".busco\" if args.no_reduction != True: noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"):", "be possible if you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag", "the assembly process and improve the results def select_champion(fastq): parse_dict = {} for", "reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco != True:", "same as the identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries", "action='store_true', default=False, help='If this tag is active, the program will skip the trimming", "args.nodes and int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if not args.memory_limit: ram_limit =", "\"\") ###Parses the libraries and checks their parameters for downstream analyses. Also performs", "= select_champion(prepared_libs) print ('###############') print ('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print", "args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly,", "\" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False)", "for assembly and variant calling. Unsuitable libraries for any of the steps will", "= true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\":", "args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000", "mybusco = i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if", "requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is active,", "copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '',", "is active, BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this", "= \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output,", "\"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks", "mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize,", "else: reduced_assembly = assembly busco_options = \"\" if args.no_busco == False: for i", "should use config file to select the assembly program to use### karyonjobfile =", "'--configuration', default=False, help=\"Configuration file. By default will use ./configuration.txt as the configuration file.\")", "== \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \" \" elif line[0] ==", "False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco = i break", "var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash", "parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some of the analyses. Default is", "print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes = n_nodes = psutil.cpu_count() if args.nodes", "+ \" \" elif chunk[5] == \"2\": continue else: backstring = backstring +", "break if i.find('short_summary') > -1 and mybusco != False: if i.find(\"specific\") == -1:", "Remember that the step is used to perform many downstream analyses. If you", "false, the program will assign a name consisting of a string of 6", "\"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o", "protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default will use ./configuration.txt as the", "== True: print (\"...but keeping what you told me...\") for e in os.listdir(home", "shutil import copyfile mybusco = False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") >", "true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly assembly = args.no_assembly if args.no_reduction ==", "all programs. By default it will use all available memory (default=1), but it", "havee been kept\") else: print (\"... removed tmp files!\") if __name__ == '__main__':", "intermediary files in the folder tmp after it has finished') parser.add_argument('-i', '--job_id', default=False,", "in open(prepared_libs): chunk = i.split() if chunk[5] == \"1\": libstring = libstring +", "prefered one for the variant calling analysis. Otherwise, karyon will select the largest", "+ \" \" + true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names,", "os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home +", "memory (default=1), but it may be useful to reduce the percent to avoid", "If you skip it, the analyses may not make much sense.') parser.add_argument('-V', '--no_varcall',", "parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output files will be generated. Required.')", "parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active it will skip the assembly", "mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0],", "skip the assembly step. It requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False,", "active, the program will skip the variant calling step. Many downstream analyses require", "\"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output,", "is active, the program will skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If", "== \".bam\": bam = i if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if", "no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory### if args.keep_tmp", "+ true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job file that calls all", "+ \" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if not", "job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that", "assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is active, the program will", "line in open(config): if line[0] == \"#\": continue elif line[0] == \"+\": prev", "variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some of the", "* float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot)", "= psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if not", "\"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r", "args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly =", "('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1])", "print (\"...but keeping what you told me...\") for e in os.listdir(home + \"tmp/\"):", "home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 =", "config_dict ###Selects the main library to use. This is set to accelerate the", "= '' for i in open(prepared_libs): chunk = i.split() if chunk[5] == \"1\":", "the results def select_champion(fastq): parse_dict = {} for i in open(fastq): chunk =", "exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import", "return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots import katplot,", "config_dict[prev][2] = line[1:-1] + \" \" return config_dict ###Selects the main library to", "consisting of a string of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory", "print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print", "if home[-1] != \"/\": home = home + \"/\" prepared_libs = home +", "\"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly assembly = args.no_assembly", "launch the reduction step of redundans. Remember that the step is used to", "if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit", "= parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] != \"/\": home = home +", "be useful to reduce the percent to avoid freezing other tasks of the", "libstring + backstring champion = select_champion(prepared_libs) print ('###############') print ('Params') print ('###############') print", "already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is active, the", "counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import katplot, allplots katplot(reduced_assembly,", "+ \"tmp/\"): for i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home +", "False: if args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name,", "select the largest library for performing the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False,", "a random string. This random string will be the same as the identifier", "'--no_trimming', action='store_true', default=False, help='If this tag is active, the program will skip the", "int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return champion", "all intermediary files in the folder tmp after it has finished') parser.add_argument('-i', '--job_id',", "the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default will use", "== \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit,", "in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home", "the plots at the end of the the variant calling step.\") parser.add_argument('-w', '--window_size',", "name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler", "karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly + \" -o \" + name+\"_no_reduc\"", "sense.') parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help=\"If this tag is active, the program will", "ignore scaffolds with length above the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use", "= i if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True:", "'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use. By default it will use dipSPAdes.", "== False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \"", "spades_recipee import call_SPAdes from prepare_libraries import preparation from trimming_libraries import trimming from varcall_recipee", "random from spades_recipee import call_SPAdes from prepare_libraries import preparation from trimming_libraries import trimming", "characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the programs set in Gb.", "default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use. By default it", "for i in config_dict['BUSCO'][1:]: busco_options = busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\")", "during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to use. If set", "help='Directory where all the output files will be generated. Required.') parser.add_argument('-o', '--output_name', default=False,", "job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output,", "output files. If omitted, it will generate a random string. This random string", "true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler", "continue elif line[0] == \"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0]", "= int(args.max_scaf2plot) if args.no_busco != True: from shutil import copyfile mybusco = False", "== \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit,", "In the future it should use config file to select the assembly program", "> -1: libs_parsed = libs_parsed + \" \" + true_output + i preparation(libs_parsed.split(),", "os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\"", "== 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\"", "it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by", "df, args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly,", "> -1 and mybusco != False: if i.find(\"specific\") == -1: mybusco = i", "if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam =", "mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if i[-4:] ==", "\" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\")", "other tasks of the computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of computation", "\" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \"", "file to select the assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\")", "it.### if not os.path.isdir(args.output_directory): if os.path.isfile == True: message = \"Path is a", "i if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup", "no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler == \"soapdenovo\"", "karyonjobfile.write(\"\\n\") switch = False if args.no_assembly == False: if args.genome_assembler == \"dipspades\" or", "\"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco != True: from shutil import copyfile mybusco", "\"\\n\") karyonjobfile.write(\"mv \" + name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" +", "from trimming_libraries import trimming from varcall_recipee import var_call from datetime import datetime parser", "for element in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element] else:", "all the output files. If omitted, it will generate a random string. This", "the program will not launch the reduction step of redundans. Remember that the", "tmp...\") if args.keep_tmp == True: print (\"...but keeping what you told me...\") for", "that the step is used to perform many downstream analyses. If you skip", "no_red_assembly + \" -o \" + name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \"", "total. If set a number lower than total, it will calculate memory usage", "is used to perform many downstream analyses. If you skip it, the analyses", "champion=[0,''] if args.favourite == False: for element in parse_dict: if int(parse_dict[element][2]) > champion[0]:", "random string will be the same as the identifier for intermediate files.') parser.add_argument('-l',", "-1: mybusco = i break if i.find('short_summary') > -1 and mybusco != False:", "+ \"tmp/\"+e) if args.keep_tmp == True: print (\"... tmp files havee been kept\")", "at: https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os, re", "import pandas as pd import string import random from spades_recipee import call_SPAdes from", "program will omit the plots at the end of the the variant calling", "the percent to avoid freezing other tasks of the computer during peaks.') parser.add_argument('-n',", "\" -o \" + name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name", "for any of the steps will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets", "'+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print", "\"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\"", "from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup =", "df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\",", "os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if not args.no_trimming: print ('###############') print", "os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True: print (\"... tmp files havee been", "Many downstream analyses require this and won't be possible if you skip it.\")", "Otherwise, karyon will select the largest library for performing the variant calling protocol.')", "assembly else: reduced_assembly = assembly busco_options = \"\" if args.no_busco == False: for", "true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction != True: noredubusco = False for e", "= e break if e.find('short_summary') > -1 and noredubusco != False: if e.find(\"specific\")", "elif line[0] == \"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] ==", "and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o \"+true_output+\"redundans_output", "scaffolds with length above the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous", "karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly + \" -o \" +", "if args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1],", "continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True: print (\"... tmp files", "a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is active, the", "config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+", "n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if", "default=False, help=\"Will ignore scaffolds with length above the given threshold\") parser.add_argument('-a', '--try_again', default=False,", "+ \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm", "import preparation from trimming_libraries import trimming from varcall_recipee import var_call from datetime import", "varcall_recipee import var_call from datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d',", "the parameters of all the programs.### def parse_config(config): config_dict = {} prev =", "files will be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the", "return ''.join(random.choice(chars) for _ in range(size)) ###Parses the config file in order to", "active, the program will skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this", "info at: https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os,", "libraries to use for assembly and variant calling. Unsuitable libraries for any of", "plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with length below", "for all the programs set in Gb. By default it will try to", "+ \"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning tmp...\")", "os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots import katplot, allplots from report import", "available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to use by all programs.", "the future it should use config file to select the assembly program to", "library as the prefered one for the variant calling analysis. Otherwise, karyon will", "downstream analyses. If you skip it, the analyses may not make much sense.')", "help='If this tag is active, the program will skip the trimming step.') parser.add_argument('-A',", "###We clean the tmp directory### if args.keep_tmp == True: existence = open(home +", "argparse import psutil import pysam import pandas as pd import string import random", "+ config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" +", "the programs.### def parse_config(config): config_dict = {} prev = 0 for line in", "will be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the output", "it will skip the assembly step. It requires a reference assembly.') parser.add_argument('-R', '--no_reduction',", "select_champion(prepared_libs) print ('###############') print ('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\")", "-o \" + name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name +", "config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\"", "end of the the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used", "parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default will use ./configuration.txt as the configuration", "prepared_libs = home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\",", "> -1: mybusco = i break if i.find('short_summary') > -1 and mybusco !=", "this tag is active, the program will skip the trimming step.') parser.add_argument('-A', '--no_assembly',", "-t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch ==", "''.join(random.choice(chars) for _ in range(size)) ###Parses the config file in order to check", "main library to use. This is set to accelerate the assembly process and", "parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to use. If set a number", "total, it will use total. If set a number lower than total, it", "###Checks that the output is not a file. If it does not exist,", "False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\")", "###Sets RAM usage options### total_nodes = n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes)", "\" + name + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \"", "args.no_assembly assembly = args.no_assembly if args.no_reduction == False and switch == False: karyonjobfile.write(\"python2", "* (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the", "from varcall_recipee import var_call from datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter)", "memory usage based on the fraction of nodes set with respect to total", "you skip it, the analyses may not make much sense.') parser.add_argument('-V', '--no_varcall', nargs='+',", "float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else:", "line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0] != \"\": continue", "champion = [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the", "= int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes <", "+ true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\")", "will not launch the reduction step of redundans. Remember that the step is", "+ \" \" + os.path.abspath(chunk[6]) + \" \" elif chunk[5] == \"2\": continue", "(counter) print ('###############') ###Calling spades_recipee.py to generate the assembly job. In the future", "parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to use by all programs. By", "True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\"", "of scaffolds to plot for scaffold-specific plots. Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False,", "\"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import katplot,", "= args.no_assembly assembly = args.no_assembly if args.no_reduction == False and switch == False:", "print ('###############') print ('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes))", "files!\") if __name__ == '__main__': t0 = datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n", "config_dict[prev][1] + line[1:-1] + \" \" elif line[0] == \"?\": if config_dict[prev][2] !=", "args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0])", "if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup =", "print ('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\") print", "= \"\" if args.no_busco == False: for i in config_dict['BUSCO'][1:]: busco_options = busco_options", "a file\" #Should raise an exception an exit the program exit_program(message) else: os.mkdir(args.output_directory)", "MA, 04/Nov/2021\"\"\" import sys, os, re import argparse import psutil import pysam import", "mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction != True: noredubusco", "\" + reduced_assembly + \" -o \" + name + busco_options + \"\\n\")", "element in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element] else: champion", "the location of configuration.txt if setting by default### config_path = args.configuration if not", "analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to", "\" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly =", "chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) ###Parses the config file", "'--window_size', default=1000, help='Window size used for some of the analyses. Default is 1000", "\"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning tmp...\") if", "\"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly", "= allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output,", "reduced_assembly = assembly else: reduced_assembly = assembly busco_options = \"\" if args.no_busco ==", "= libstring + os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) + \" \" elif", "busco_options = busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i", "+ \"-i \" + no_red_assembly + \" -o \" + name+\"_no_reduc\" + busco_options", "the prefered one for the variant calling analysis. Otherwise, karyon will select the", "reduction step of redundans. Remember that the step is used to perform many", "steps will be ignored. Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the", "dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If", "default=False, action='store_true', help='Use previous karyon results and skips already computed steps.') parser.add_argument('-K', '--keep_tmp',", "i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup", "uses job ID### job_ID = args.job_id if args.job_id else id_generator() name = args.output_name", "!= True: noredubusco = False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1:", "= true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler == \"soapdenovo\" or", "in order to check the parameters of all the programs.### def parse_config(config): config_dict", "default=False, help=\"Will ignore scaffolds with length below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False,", "trimmo_commands = '' else: trimmo_commands = \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0],", "if args.no_assembly == False: if args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs,", "-t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif", "import var_call from datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory',", "\" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False:", "home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots", "__name__ == '__main__': t0 = datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed!", "import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks their", "id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) ###Parses the config", "print (true_output+name+\".sorted.bam\") print (true_output+name+\".mpileup\") print (champion[-1]) print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home +", "line[0] == \"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1] elif line[0]", "prefix name is not defined, it uses job ID### job_ID = args.job_id if", "\" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\"", "or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly", "as pd import string import random from spades_recipee import call_SPAdes from prepare_libraries import", "total_nodes: n_nodes = int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if", "\"/\": home = home + \"/\" prepared_libs = home + \"tmp/\" + job_ID", "args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory### if", "+ \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True: print (\"...", "\"1\": libstring = libstring + os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) + \"", "name consisting of a string of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False,", "'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly", "-n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\"", "config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks their parameters for downstream analyses. Also", "vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]),", "that calls all the programs if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name,", "true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False)", "continue config_dict[prev][0] = line[1:-1] elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1]", "== \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\"", "\" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1:", "will use ./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo',", "assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler ==", "useful to reduce the percent to avoid freezing other tasks of the computer", "perform many downstream analyses. If you skip it, the analyses may not make", "Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to plot", "karyonjobfile.close() #5) Create job file that calls all the programs if args.no_varcall ==", "('###############') print ('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print", "print (\"... tmp files havee been kept\") else: print (\"... removed tmp files!\")", "default=False, action='store_true', help='If this tag is active, BUSCO analysis will be ommited.') parser.add_argument('-P',", "a string of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for", "to perform many downstream analyses. If you skip it, the analyses may not", "reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is active, the program", "default=False, help=\"Configuration file. By default will use ./configuration.txt as the configuration file.\") parser.add_argument('-g',", "\" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if not args.no_trimming:", "\"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j", "* int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job ID and the prefix name", "if i.find(\"specific\") == -1: mybusco = i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\")", "assembly and variant calling. Unsuitable libraries for any of the steps will be", "the job. If job ID is not user defined, it produces a random", "'' else: trimmo_commands = \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home", "== False: if args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output,", "and variant calling. Unsuitable libraries for any of the steps will be ignored.", "busco_options = \"\" if args.no_busco == False: for i in config_dict['BUSCO'][1:]: busco_options =", "+ name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc \"", "be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the output files.", "parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite == False: for element in parse_dict: if", "will assign a name consisting of a string of 6 random alphanumeric characters.')", "+ \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i", "karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\"", "os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home +", "config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output)", "if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup =", "else: from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup", "args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\")", "= busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \"", "character string. If prefix name is not defined, it uses job ID### job_ID", "+ os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if not args.no_trimming: print ('###############')", "args.scafmaxsize, args.no_plot) else: from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf,", "'--libraries', required=True, nargs='+', help=\"Fastq libraries to use for assembly and variant calling. Unsuitable", "= os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid,", "-o \" + name + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name +", "file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use.", "df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\",", "'' for i in no_varcall: if i[-4:] == \".bam\": bam = i if", "Required.\") parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the prefered one for the", "the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results and skips", "if args.keep_tmp == True: print (\"... tmp files havee been kept\") else: print", "\" + true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including putting", "-o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly =", "skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is active, BUSCO analysis", "{} for i in open(fastq): chunk = i.split() if chunk[5] == \"2\": continue", "config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\")", "\"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+", "\" libstring = libstring + backstring champion = select_champion(prepared_libs) print ('###############') print ('Params')", "champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly,", "\"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False", "required=True, nargs='+', help=\"Fastq libraries to use for assembly and variant calling. Unsuitable libraries", "else: no_red_assembly = args.no_assembly assembly = args.no_assembly if args.no_reduction == False and switch", "home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if", "help='Use previous karyon results and skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False,", "\" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly + \"", "import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all", "> -1 and noredubusco != False: if e.find(\"specific\") == -1: noredubusco = e", "software to use. By default it will use dipSPAdes. Options are: dipSPADEs, SPAdes,", "bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots import katplot, allplots from", "print (\"Job name: \"+str(name)) print ('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if", "ID is not user defined, it produces a random 6 character string. If", "!= False: if i.find(\"specific\") == -1: mybusco = i if mybusco != False:", "copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction != True: noredubusco = False for", "it should use config file to select the assembly program to use### karyonjobfile", "i break if i.find('short_summary') > -1 and mybusco != False: if i.find(\"specific\") ==", "if i.find(\"pileup\") > -1: mpileup = i if os.path.isfile(i+\".mpileup\") == True: mpileup =", "break if e.find('short_summary') > -1 and noredubusco != False: if e.find(\"specific\") == -1:", "argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output files will", "analyses require this and won't be possible if you skip it.\") parser.add_argument('-B', '--no_busco',", "true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction == True:", "i[-4:] == \".vcf\": vcf = i if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\"", "== \"#\": continue elif line[0] == \"+\": prev = line[1:-1] config_dict[prev] = [\"\",\"\",\"\"]", "copyfile mybusco = False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco", "assembly = args.no_assembly if args.no_reduction == False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\"", "< total_nodes: n_nodes = int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction))", "default=False, help='If this tag is active, the program will not launch the reduction", "\"\": continue config_dict[prev][0] = line[1:-1] elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] +", "= args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path =", "== False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot, allplots katplot(\"\", \"\", config_dict[\"KAT\"][0],", "trimming_libraries import trimming from varcall_recipee import var_call from datetime import datetime parser =", "the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software", "+ busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \" \" + true_output+name+\"_busco\\n\")", "file\" #Should raise an exception an exit the program exit_program(message) else: os.mkdir(args.output_directory) elif", "i in no_varcall: if i[-4:] == \".bam\": bam = i if os.path.isfile(i+\".bam\") ==", "> champion[0]: champion = [int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return champion def", "== \"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \"", "line[1:-1] elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \" \"", "for i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue", "parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '', '' for i in no_varcall: if", "= i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from karyonplots", "= true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\":", "directory### if args.keep_tmp == True: existence = open(home + \"tmp/\" + job_ID +", "open(fastq): chunk = i.split() if chunk[5] == \"2\": continue else: parse_dict[chunk[0]] = chunk[1:]", "use total. If set a number lower than total, it will calculate memory", "of all the programs.### def parse_config(config): config_dict = {} prev = 0 for", "\"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is not", "+ \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly", "prepared_libs) libs_parsed = '' if not args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############')", "true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\": if", "tmp directory### if args.keep_tmp == True: existence = open(home + \"tmp/\" + job_ID", "will be the same as the identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True,", "str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco !=", "print (config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print", "of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the", "omit the plots at the end of the the variant calling step.\") parser.add_argument('-w',", "of the the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used for", "= line[1:-1] + \" \" return config_dict ###Selects the main library to use.", "print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############') ###Calling spades_recipee.py to", "args.no_busco != True: from shutil import copyfile mybusco = False for i in", "job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home,", "this tag is active, BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False,", "alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the programs set in", "backstring = '' for i in open(prepared_libs): chunk = i.split() if chunk[5] ==", "is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to plot for", "bam, mpileup = '', '', '' for i in no_varcall: if i[-4:] ==", "it will try to use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of", "report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly,", "help=\"If this tag is active, the program will skip the variant calling step.", "all available memory (default=1), but it may be useful to reduce the percent", "= n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes: n_nodes = int(args.nodes)", "ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]),", "\"\" if args.no_busco == False: for i in config_dict['BUSCO'][1:]: busco_options = busco_options +", "(\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco != True: from shutil import copyfile", "= True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import", "of nodes set with respect to total existing nodes.') args = parser.parse_args() def", "true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage", "'--try_again', default=False, action='store_true', help='Use previous karyon results and skips already computed steps.') parser.add_argument('-K',", "elif args.no_reduction == False and switch == True: reduced_assembly = assembly else: reduced_assembly", "= {} prev = 0 for line in open(config): if line[0] == \"#\":", "args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]", "downstream analyses require this and won't be possible if you skip it.\") parser.add_argument('-B',", "print (\"... removed tmp files!\") if __name__ == '__main__': t0 = datetime.now() try:", "= home + \"/\" prepared_libs = home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\"", "\"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t", "(1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds to plot for scaffold-specific plots.", "\"+str(name)) print ('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] != \"/\":", "= '' for i in args.libraries: libs = libs + \" \" +", "False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly + \" -o \"", "args.favourite == False: for element in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion =", "= assembly busco_options = \"\" if args.no_busco == False: for i in config_dict['BUSCO'][1:]:", "not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output =", "import random from spades_recipee import call_SPAdes from prepare_libraries import preparation from trimming_libraries import", "BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag is", "tag is active it will skip the assembly step. It requires a reference", "parameters of all the programs.### def parse_config(config): config_dict = {} prev = 0", "user defined, it produces a random 6 character string. If prefix name is", "this tag is active, the program will not remove all intermediary files in", "true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '', ''", "== True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] +", "for some of the analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum", "i in open(fastq): chunk = i.split() if chunk[5] == \"2\": continue else: parse_dict[chunk[0]]", "= parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size))", "\"\": continue config_dict[prev][2] = line[1:-1] + \" \" return config_dict ###Selects the main", "+ '/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning tmp...\") if args.keep_tmp == True:", "tag is active, the program will skip the variant calling step. Many downstream", "args.no_plot) df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size,", "finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the different", "If set a number higher than total, it will use total. If set", "the reduction step of redundans. Remember that the step is used to perform", "vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return vcf, bam,", "(\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############') config_dict = parse_config(config_path) home", "== True: mpileup = i+\".pileup\" if i[-4:] == \".vcf\": vcf = i if", "variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default will use ./configuration.txt", "if config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1] + \" \" return config_dict", "existence.close() print (\"Now I'm cleaning tmp...\") if args.keep_tmp == True: print (\"...but keeping", "true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction", "+ job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")):", "\" \" libstring = libstring + backstring champion = select_champion(prepared_libs) print ('###############') print", "+ name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" +", "args.keep_tmp == True: existence = open(home + \"tmp/\" + job_ID + '/_keep_existing_', 'w')", "\"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is not a file.", "Unsuitable libraries for any of the steps will be ignored. Required.\") parser.add_argument('-F', '--favourite',", "katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0],", "It requires a reference assembly.') parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is", "sys, os, re import argparse import psutil import pysam import pandas as pd", "is active, the program will skip the variant calling step. Many downstream analyses", "checks their parameters for downstream analyses. Also performs trimming.### print ('###############') print ('Preparing", "name for all the output files. If omitted, it will generate a random", "parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with length above the given threshold\") parser.add_argument('-a',", "df2 = ploidy_veredict(df, true_output, name, args.window_size) report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco,", "tag is active, the program will not remove all intermediary files in the", "help='Prefix name for all the output files. If omitted, it will generate a", "+ name + \" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" +", "choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use. By default it will", "i.find(\"pileup\") > -1: mpileup = i if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\"", "config_dict['BUSCO'][1:]: busco_options = busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" +", "default=False, help='Identifier of the intermediate files generated by the different programs. If false,", "is active, the program will not launch the reduction step of redundans. Remember", "'' if not args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] ==", "trimming from varcall_recipee import var_call from datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog,", "Also performs trimming.### print ('###############') print ('Preparing libraries') print ('###############') libs = ''", "or args.genome_assembler == \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output", "\"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r", "os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df,", "if not os.path.isdir(args.output_directory): if os.path.isfile == True: message = \"Path is a file\"", "i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco = i break if i.find('short_summary')", "= i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup = i if os.path.isfile(i+\".mpileup\") == True:", "the folder tmp after it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the", "== '__main__': t0 = datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\")", "if config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1] elif line[0] == \">\": config_dict[prev][1]", "os.path.isfile == True: message = \"Path is a file\" #Should raise an exception", "true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif", "== False: for element in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]),", "true_output+\"redundans_output/scaffolds.filled.fa\" switch = True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from", "False: from karyonplots import katplot, allplots from report import report, ploidy_veredict katplot(reduced_assembly, champion[1],", "epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os, re import argparse import", "Create job file that calls all the programs if args.no_varcall == False: var_call(prepared_libs,", "if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp", "action='store_true', default=False, help=\"If this tag is active, the program will omit the plots", "threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with length above the given threshold\")", "true_output, counter, job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot) df2 = ploidy_veredict(df, true_output, name,", "nodes set with respect to total existing nodes.') args = parser.parse_args() def id_generator(size=6,", "name+\"_no_reduc\" + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc \" +", "os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\")) prepared_libs = os.path.join(path_tmp_jobid, \"prepared_libraries.txt\")", "the variant calling analysis. Otherwise, karyon will select the largest library for performing", "home = home + \"/\" prepared_libs = home + \"tmp/\" + job_ID +", "Gb. By default it will try to use all memory available.') parser.add_argument('-M', '--memory_fraction',", "dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is", "reduce the percent to avoid freezing other tasks of the computer during peaks.')", "os.path.abspath(chunk[0]) + \" \" libstring = libstring + backstring champion = select_champion(prepared_libs) print", "\"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes)", "i.find('short_summary') > -1 and mybusco != False: if i.find(\"specific\") == -1: mybusco =", "libs_parsed + \" \" + true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library", "if os.path.isfile == True: message = \"Path is a file\" #Should raise an", "if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly +", "import call_SPAdes from prepare_libraries import preparation from trimming_libraries import trimming from varcall_recipee import", "open(config): if line[0] == \"#\": continue elif line[0] == \"+\": prev = line[1:-1]", "= \"Path is a file\" #Should raise an exception an exit the program", "default will use ./configuration.txt as the configuration file.\") parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus',", "open(prepared_libs): chunk = i.split() if chunk[5] == \"1\": libstring = libstring + os.path.abspath(chunk[0])", "+ \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home + \"tmp/\"+e)", "of computation nodes to use. If set a number higher than total, it", "including putting absolute paths # libstring = '' backstring = '' for i", "= os.path.dirname(os.path.realpath(sys.argv[0])) config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1]", "print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands = '' else: trimmo_commands = \"", "args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name,", "\"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1] + \" \" return", "to total existing nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return", "= os.path.join(path_tmp_jobid, \"prepared_libraries.txt\") ###Checks that the output is not a file. If it", "KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt = datetime.now()-t0 sys.stderr.write(\"#Time elapsed: %s\\n\" % dt)", "libstring = '' backstring = '' for i in open(prepared_libs): chunk = i.split()", "noredubusco = e break if e.find('short_summary') > -1 and noredubusco != False: if", "\"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue", "i+\".pileup\" if i[-4:] == \".vcf\": vcf = i if os.path.isfile(i+\".vcf\") == True: vcf", "skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is active,", "###Defines the location of configuration.txt if setting by default### config_path = args.configuration if", "\"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n", "= selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\"", "a file. If it does not exist, it creates it.### if not os.path.isdir(args.output_directory):", "true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch == True: reduced_assembly = assembly else:", "= i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True: bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1:", "psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if not args.memory_limit:", "parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to use for assembly and variant calling.", "'--no_plot', action='store_true', default=False, help=\"If this tag is active, the program will omit the", "plots at the end of the the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000,", "soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n", "False: if e.find(\"specific\") == -1: noredubusco = e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco,", "('###############') print ('Params') print ('###############') print (args.window_size) print (true_output+name+\".raw.vcf\") print (true_output+\"redundans_output/scaffolds.filled.fa\") print (true_output+name+\".sorted.bam\")", "os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\"", "== False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction)", "if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return", "after it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated", "#5) Create job file that calls all the programs if args.no_varcall == False:", "= i if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") == True:", "many downstream analyses. If you skip it, the analyses may not make much", "'', '' for i in no_varcall: if i[-4:] == \".bam\": bam = i", "on the fraction of nodes set with respect to total existing nodes.') args", "nodes to use. If set a number higher than total, it will use", "is not defined, it uses job ID### job_ID = args.job_id if args.job_id else", "bam = i if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\") ==", "ram_limit = args.memory_limit * int(args.memory_fraction) counter = int(args.max_scaf2plot) ###Sets the job ID and", "name for the job. If job ID is not user defined, it produces", "me...\") for e in os.listdir(home + \"tmp/\"): for i in os.listdir(home + \"tmp/\"+e):", "libs = libs + \" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed =", "= libs_parsed + \" \" + true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing", "def parse_config(config): config_dict = {} prev = 0 for line in open(config): if", "== False: for i in config_dict['BUSCO'][1:]: busco_options = busco_options + \" \" +", "os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed + \" \" + true_output", "-r busco_downloads\\n\") karyonjobfile.close() #5) Create job file that calls all the programs if", "elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs,", "main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt = datetime.now()-t0 sys.stderr.write(\"#Time elapsed: %s\\n\"", "preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including putting absolute paths # libstring =", "champion = [int(parse_dict[element][2]), element] else: champion = [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message)", "tmp after it has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files", "raise an exception an exit the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again ==", "\"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else: no_red_assembly = args.no_assembly assembly", "Default is 20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with length below the", "+ \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import", "datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt = datetime.now()-t0 sys.stderr.write(\"#Time", "use for assembly and variant calling. Unsuitable libraries for any of the steps", "action='store_true', default=False, help='If this tag is active, the program will not remove all", "not user defined, it produces a random 6 character string. If prefix name", "= False for e in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco = e", "False: for element in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element]", "args.no_varcall == False: from karyonplots import katplot, allplots from report import report, ploidy_veredict", "main(): ###Defines the location of configuration.txt if setting by default### config_path = args.configuration", "desc=\"\"\"Karyon pipeline. More info at: https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\"", "= i break if i.find('short_summary') > -1 and mybusco != False: if i.find(\"specific\")", "help=\"Genome assembly software to use. By default it will use dipSPAdes. Options are:", "continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite == False: for element in", "and mybusco != False: if i.find(\"specific\") == -1: mybusco = i if mybusco", "from karyonplots import katplot, allplots from report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0],", "Worcester MA, 04/Nov/2021\"\"\" import sys, os, re import argparse import psutil import pysam", "print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") os.system(\"python3", "-1: noredubusco = e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\"", "length above the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results", "ram_limit, n_nodes) assembly = true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or", "\"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############') ###Calling spades_recipee.py to generate the assembly", "###Parses the config file in order to check the parameters of all the", "parse_no_varcall(args.no_varcall) df = allplots(int(args.window_size), vcf, reduced_assembly, bam, mpileup, os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home +", "available memory (default=1), but it may be useful to reduce the percent to", "libstring = libstring + backstring champion = select_champion(prepared_libs) print ('###############') print ('Params') print", "analyses. If you skip it, the analyses may not make much sense.') parser.add_argument('-V',", "redundans. Remember that the step is used to perform many downstream analyses. If", "program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly", "to generate the assembly job. In the future it should use config file", "= open(home + \"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now I'm", "if args.keep_tmp == True: print (\"...but keeping what you told me...\") for e", "in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco = i break if i.find('short_summary') >", "string will be the same as the identifier for intermediate files.') parser.add_argument('-l', '--libraries',", "<NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os, re import argparse import psutil", "('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID:", "does not exist, it creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile == True:", "largest library for performing the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file.", "setting by default### config_path = args.configuration if not args.configuration: selfpath = os.path.dirname(os.path.realpath(sys.argv[0])) config_path", "busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp", "= i if os.path.isfile(i+\".mpileup\") == True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True:", "of a string of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit", "ignore scaffolds with length below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore", "will skip the variant calling step. Many downstream analyses require this and won't", "\"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############') config_dict =", "some of the analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number", "<reponame>Gabaldonlab/karyon desc=\"\"\"Karyon pipeline. More info at: https://github.com/Gabaldonlab/karyon \"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA,", "job_ID print ('###############') print ('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print (\"Nodes:", "mybusco != False: if i.find(\"specific\") == -1: mybusco = i if mybusco !=", "True: mpileup = i+\".mpileup\" if os.path.isfile(i+\".pileup\") == True: mpileup = i+\".pileup\" if i[-4:]", "a name consisting of a string of 6 random alphanumeric characters.') parser.add_argument('-m', '--memory_limit',", "if i.find(\"specific\") > -1: mybusco = i break if i.find('short_summary') > -1 and", "'_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in", "for i in open(fastq): chunk = i.split() if chunk[5] == \"2\": continue else:", "as the identifier for intermediate files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to", "= int(args.max_scaf2plot) ###Sets the job ID and the prefix name for the job.", "trimming.### print ('###############') print ('Preparing libraries') print ('###############') libs = '' for i", "true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes = n_nodes", "use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly == False:", "!= False: if e.find(\"specific\") == -1: noredubusco = e if noredubusco != False:", "existence = open(home + \"tmp/\" + job_ID + '/_keep_existing_', 'w') existence.close() print (\"Now", "import string import random from spades_recipee import call_SPAdes from prepare_libraries import preparation from", "step. Many downstream analyses require this and won't be possible if you skip", "continue else: backstring = backstring + os.path.abspath(chunk[0]) + \" \" libstring = libstring", "config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1] + \" \" return config_dict ###Selects", "True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee", "!= \"\": continue config_dict[prev][0] = line[1:-1] elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1]", "i if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf", "putting absolute paths # libstring = '' backstring = '' for i in", "= assembly else: reduced_assembly = assembly busco_options = \"\" if args.no_busco == False:", "config file in order to check the parameters of all the programs.### def", "set in Gb. By default it will try to use all memory available.')", "import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\",", "args.keep_tmp == True: print (\"...but keeping what you told me...\") for e in", "20.\") parser.add_argument('-s', '--scafminsize', default=False, help=\"Will ignore scaffolds with length below the given threshold\")", "config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home", "+ \" \" libstring = libstring + backstring champion = select_champion(prepared_libs) print ('###############')", "SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is active,", "ID and the prefix name for the job. If job ID is not", "#karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create", "switch = True elif args.genome_assembler == \"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee", "preparation(libs.split(), 10000, prepared_libs) libs_parsed = '' if not args.no_trimming: print ('###############') print ('Trimmomatic')", "the largest library for performing the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration", "int(args.max_scaf2plot) ###Sets the job ID and the prefix name for the job. If", "\" -o \" + name + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name", "config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from", "nargs='+', default=False, help=\"If this tag is active, the program will skip the variant", "in config_dict['BUSCO'][1:]: busco_options = busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \"", "it produces a random 6 character string. If prefix name is not defined,", "\" \" + true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including", "counter = int(args.max_scaf2plot) ###Sets the job ID and the prefix name for the", "args.output_name if args.output_name else job_ID print ('###############') print ('Config. path: '+str(config_path)) print (\"RAM", "\"Path is a file\" #Should raise an exception an exit the program exit_program(message)", "given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds with length above the given", "\"tmp/\"+e): if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if", "the config file in order to check the parameters of all the programs.###", "= true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output,", "pysam import pandas as pd import string import random from spades_recipee import call_SPAdes", "parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is active, the program will not", "libs_parsed = '' if not args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############') if", "args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False,", "== '': trimmo_commands = '' else: trimmo_commands = \" -c \" + config_dict['trimmomatic'][1]", "e.find('short_summary') > -1 and noredubusco != False: if e.find(\"specific\") == -1: noredubusco =", "False: for i in config_dict['BUSCO'][1:]: busco_options = busco_options + \" \" + i[:-1]", "tag is active, the program will not launch the reduction step of redundans.", "i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\" if args.no_reduction !=", "os.path.abspath(args.output_directory) if true_output[-1] != \"/\": true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes", "is active it will skip the assembly step. It requires a reference assembly.')", "i.split() if chunk[5] == \"1\": libstring = libstring + os.path.abspath(chunk[0]) + \" \"", "omitted, it will generate a random string. This random string will be the", "\"+str(ram_limit)+\"Gb\") print (\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print", "the program will skip the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag", "os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall", "it will use all available memory (default=1), but it may be useful to", "no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0],", "= home + \"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID)", "intermediate files generated by the different programs. If false, the program will assign", "results and skips already computed steps.') parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag", "spades_recipee.py to generate the assembly job. In the future it should use config", "ommited.') parser.add_argument('-P', '--no_plot', action='store_true', default=False, help=\"If this tag is active, the program will", "False if args.no_assembly == False: if args.genome_assembler == \"dipspades\" or args.genome_assembler == 'dipSPAdes':", "nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _", "= line[1:-1] config_dict[prev] = [\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0] != \"\":", "home[-1] != \"/\": home = home + \"/\" prepared_libs = home + \"tmp/\"", "\"\"\" epilog=\"\"\"Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021\"\"\" import sys, os, re import argparse", "\"tmp/\" + job_ID + \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home,", "mpileup = i+\".pileup\" if i[-4:] == \".vcf\": vcf = i if os.path.isfile(i+\".vcf\") ==", "to accelerate the assembly process and improve the results def select_champion(fastq): parse_dict =", "true_output=true_output+\"/\" print(\"wololo\", true_output) ###Sets RAM usage options### total_nodes = n_nodes = psutil.cpu_count() if", "(config_dict['nQuire'][0]) print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############')", "karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j", "karyonplots import katplot, allplots from report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\")", "e in os.listdir(home + \"tmp/\"): for i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_'", "exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location of configuration.txt if setting by", "this tag is active, the program will skip the variant calling step. Many", "'' backstring = '' for i in open(prepared_libs): chunk = i.split() if chunk[5]", "report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\",", "not args.no_trimming: print ('###############') print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands", "the intermediate files generated by the different programs. If false, the program will", "it will generate a random string. This random string will be the same", "use. By default it will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or", "katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df =", "line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \" \" elif line[0]", "(\"... removed tmp files!\") if __name__ == '__main__': t0 = datetime.now() try: main()", "config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home", "in os.listdir(home + \"tmp/\"): for i in os.listdir(home + \"tmp/\"+e): if '_keep_existing_' in", "to use by all programs. By default it will use all available memory", "= False for i in os.listdir(true_output+name+\"_busco\"): if i.find(\"specific\") > -1: mybusco = i", "step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some of the analyses. Default", "told me...\") for e in os.listdir(home + \"tmp/\"): for i in os.listdir(home +", "will use total. If set a number lower than total, it will calculate", "(\"... tmp files havee been kept\") else: print (\"... removed tmp files!\") if", "all the output files will be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix name", "is a file\" #Should raise an exception an exit the program exit_program(message) else:", "programs if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes),", "i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\")", "try to use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory", "= datetime.now() try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt = datetime.now()-t0", "--noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t", "remove all intermediary files in the folder tmp after it has finished') parser.add_argument('-i',", "else: os.rmdir(home + \"tmp/\"+e) if args.keep_tmp == True: print (\"... tmp files havee", "'--no_assembly', default=False, help='If this tag is active it will skip the assembly step.", "existing nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for", "\"2\": continue else: backstring = backstring + os.path.abspath(chunk[0]) + \" \" libstring =", "it does not exist, it creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile ==", "has finished') parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the", "random alphanumeric characters.') parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the programs set", "than total, it will use total. If set a number lower than total,", "if e.find(\"specific\") == -1: noredubusco = e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\")", "where all the output files will be generated. Required.') parser.add_argument('-o', '--output_name', default=False, help='Prefix", "the libraries and checks their parameters for downstream analyses. Also performs trimming.### print", "use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to use", "will select the largest library for performing the variant calling protocol.') parser.add_argument('-c', '--configuration',", "switch == True: reduced_assembly = assembly else: reduced_assembly = assembly busco_options = \"\"", "[\"\",\"\",\"\"] elif line[0] == \"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1]", "set a number higher than total, it will use total. If set a", "step of redundans. Remember that the step is used to perform many downstream", "try: main() except KeyboardInterrupt: sys.stderr.write(\"\\n Ctrl-C pressed! \\n\") dt = datetime.now()-t0 sys.stderr.write(\"#Time elapsed:", "for i in args.libraries: libs = libs + \" \" + os.path.abspath(i) preparation(libs.split(),", "or args.genome_assembler == 'SPAdes': call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly", "the analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot', default=20, help=\"Maximum number of scaffolds", "+ busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\")", "for performing the variant calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default", "parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results and skips already computed steps.')", "size used for some of the analyses. Default is 1000 (1Kb)') parser.add_argument('-x', '--max_scaf2plot',", "home + \"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i", "* float(args.memory_fraction)) if n_nodes < total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes))", "the job ID and the prefix name for the job. If job ID", "< total_nodes: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit", "formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output files will be generated.", "of the intermediate files generated by the different programs. If false, the program", "default=False, help='Number of computation nodes to use. If set a number higher than", "(\"Job name: \"+str(name)) print ('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1]", "I'm cleaning tmp...\") if args.keep_tmp == True: print (\"...but keeping what you told", "element] else: champion = [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main():", "else: trimmo_commands = \" -c \" + config_dict['trimmomatic'][1] trimming(prepared_libs, config_dict[\"trimmomatic\"][0], trimmo_commands, home +", "the program will omit the plots at the end of the the variant", "_ in range(size)) ###Parses the config file in order to check the parameters", "it will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.\") parser.add_argument('-T', '--no_trimming',", "== True: bam = i+\".sorted.bam\" if i.find(\"pileup\") > -1: mpileup = i if", "the output is not a file. If it does not exist, it creates", "karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly == False: if", "the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from", "config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else:", "print (config_dict[\"KAT\"][0]) print (home + \"tmp/\"+job_ID+\"/\") print (true_output) print (counter) print ('###############') ###Calling", "= line[1:-1] elif line[0] == \">\": config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + \"", "allplots from report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size),", "string. If prefix name is not defined, it uses job ID### job_ID =", "else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly =", "= argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output files", "string. This random string will be the same as the identifier for intermediate", "scaffolds with length below the given threshold\") parser.add_argument('-S', '--scafmaxsize', default=False, help=\"Will ignore scaffolds", "tmp files havee been kept\") else: print (\"... removed tmp files!\") if __name__", "at the end of the the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window", "default it will use all available memory (default=1), but it may be useful", "job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot) else: from karyonplots import katplot, allplots katplot(reduced_assembly, champion[1],", "line[0] == \"#\": continue elif line[0] == \"+\": prev = line[1:-1] config_dict[prev] =", "true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0], home + \"tmp/\"+job_ID+\"/\", true_output+\"Report/\", counter, job_ID, name, args.scafminsize, args.scafmaxsize,", "int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction) counter =", "based on the fraction of nodes set with respect to total existing nodes.')", "chunk[5] == \"2\": continue else: backstring = backstring + os.path.abspath(chunk[0]) + \" \"", "default=1000, help='Window size used for some of the analyses. Default is 1000 (1Kb)')", "i.find(\"specific\") > -1: mybusco = i break if i.find('short_summary') > -1 and mybusco", "busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + no_red_assembly", "('###############') libs = '' for i in args.libraries: libs = libs + \"", "mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the tmp directory### if args.keep_tmp == True:", "else job_ID print ('###############') print ('Config. path: '+str(config_path)) print (\"RAM Limit: \"+str(ram_limit)+\"Gb\") print", "os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) + \" \" elif chunk[5] == \"2\":", "\"soapdenovo\" or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1],", "this tag is active, the program will omit the plots at the end", "+ \"/prepared_libraries.txt\" path_tmp_jobid = os.path.join(home, \"tmp\", job_ID) if not os.path.exists(os.path.join(home, \"tmp\")): os.mkdir(os.path.join(home, \"tmp\"))", "noredubusco = e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def", "= true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam, mpileup = '', '', '' for i", "for line in open(config): if line[0] == \"#\": continue elif line[0] == \"+\":", "(\"Nodes: \"+str(n_nodes)) print (\"Job ID: \"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############') config_dict", "busco_options + \" \" + i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" +", "for _ in range(size)) ###Parses the config file in order to check the", "skip the variant calling step. Many downstream analyses require this and won't be", "libstring + os.path.abspath(chunk[0]) + \" \" + os.path.abspath(chunk[6]) + \" \" elif chunk[5]", "help=\"Maximum number of scaffolds to plot for scaffold-specific plots. Default is 20.\") parser.add_argument('-s',", "use. This is set to accelerate the assembly process and improve the results", "# libstring = '' backstring = '' for i in open(prepared_libs): chunk =", "'--no_reduction', action='store_true', default=False, help='If this tag is active, the program will not launch", "help='If this tag is active, the program will not launch the reduction step", "default=False, help=\"If this tag is active, the program will skip the variant calling", "parser.add_argument('-g', '--genome_assembler', default=\"dipspades\", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help=\"Genome assembly software to use. By", "analysis. Otherwise, karyon will select the largest library for performing the variant calling", "print ('Preparing libraries') print ('###############') libs = '' for i in args.libraries: libs", "+ home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed", "the prefix name for the job. If job ID is not user defined,", "true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif", "calculate memory usage based on the fraction of nodes set with respect to", "help='Window size used for some of the analyses. Default is 1000 (1Kb)') parser.add_argument('-x',", "exit the program exit_program(message) else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid)", "assembly program to use### karyonjobfile = open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if", "i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including putting absolute paths # libstring", "print ('###############') ###Calling spades_recipee.py to generate the assembly job. In the future it", "= '' backstring = '' for i in open(prepared_libs): chunk = i.split() if", "return config_dict ###Selects the main library to use. This is set to accelerate", "= true_output+\"dipspades/consensus_contigs.fasta\" no_red_assembly = true_output+\"dipspades/consensus_contigs.fasta\" elif args.genome_assembler == \"spades\" or args.genome_assembler == 'SPAdes':", "args.no_reduction == False and switch == True: reduced_assembly = assembly else: reduced_assembly =", "you told me...\") for e in os.listdir(home + \"tmp/\"): for i in os.listdir(home", "chunk[1:] champion=[0,''] if args.favourite == False: for element in parse_dict: if int(parse_dict[element][2]) >", "+ \" \" elif line[0] == \"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2]", "\" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\")", "= config_dict[prev][1] + line[1:-1] + \" \" elif line[0] == \"?\": if config_dict[prev][2]", "in os.listdir(true_output+name+\"_no_reduc_busco\"): if e.find(\"specific\") > -1: noredubusco = e break if e.find('short_summary') >", "noredubusco != False: if e.find(\"specific\") == -1: noredubusco = e if noredubusco !=", "continue else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else:", "tag is active, the program will omit the plots at the end of", "i.find(\"parsed_\") > -1: libs_parsed = libs_parsed + \" \" + true_output + i", "parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the different programs.", "-n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\") assembly = true_output+\"soapdenovo/\"+name+\".scafSeq\" else: pass else:", "will omit the plots at the end of the the variant calling step.\")", "number higher than total, it will use total. If set a number lower", "you skip it.\") parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is active, BUSCO", "\" \" + os.path.abspath(chunk[6]) + \" \" elif chunk[5] == \"2\": continue else:", "usage options### total_nodes = n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes) < total_nodes:", "-i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and", "e.find(\"specific\") > -1: noredubusco = e break if e.find('short_summary') > -1 and noredubusco", "with respect to total existing nodes.') args = parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase +", "n_nodes = int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes", "-o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly = true_output+\"redundans_output/scaffolds.filled.fa\"", "elif line[0] == \"@\": if config_dict[prev][0] != \"\": continue config_dict[prev][0] = line[1:-1] elif", "If it does not exist, it creates it.### if not os.path.isdir(args.output_directory): if os.path.isfile", "parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] != \"/\": home = home + \"/\"", "karyonjobfile.write(\"rm -r busco_downloads\\n\") if args.no_reduction == False: karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \"", "random string. This random string will be the same as the identifier for", "in parse_dict: if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element] else: champion =", "import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall) df", "else: backstring = backstring + os.path.abspath(chunk[0]) + \" \" libstring = libstring +", "counter = int(args.max_scaf2plot) if args.no_busco != True: from shutil import copyfile mybusco =", "the programs if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit),", "will calculate memory usage based on the fraction of nodes set with respect", "parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the programs set in Gb. By", "i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed + \" \"", "e if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf,", "= libstring + backstring champion = select_champion(prepared_libs) print ('###############') print ('Params') print ('###############')", "= true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or args.genome_assembler == \"Platanus\": if args.no_reduction ==", "name + busco_options + \"\\n\") karyonjobfile.write(\"mv \" + name + \" \" +", "the the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some", "str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction) os.system (\"bash \"+true_output+name+\"_karyon.job\") counter = int(args.max_scaf2plot) if args.no_busco", "to use. If set a number higher than total, it will use total.", "= [0,args.favourite] return champion def exit_program(message): sys.stderr.write(\"\\n%s\\n\\n\"%message) sys.exit(1) def main(): ###Defines the location", "not os.path.isdir(args.output_directory): if os.path.isfile == True: message = \"Path is a file\" #Should", "By default it will try to use all memory available.') parser.add_argument('-M', '--memory_fraction', default=1,", "> -1: noredubusco = e break if e.find('short_summary') > -1 and noredubusco !=", "in open(fastq): chunk = i.split() if chunk[5] == \"2\": continue else: parse_dict[chunk[0]] =", "RAM usage options### total_nodes = n_nodes = psutil.cpu_count() if args.nodes and int(args.nodes) <", "+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif", "vcf = i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if args.no_varcall == False: from", "epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output files will be", "!= \"/\": home = home + \"/\" prepared_libs = home + \"tmp/\" +", "job_ID = args.job_id if args.job_id else id_generator() name = args.output_name if args.output_name else", "if args.job_id else id_generator() name = args.output_name if args.output_name else job_ID print ('###############')", "a number lower than total, it will calculate memory usage based on the", "pandas as pd import string import random from spades_recipee import call_SPAdes from prepare_libraries", "config_path = selfpath[:selfpath.rfind('/')] config_path = selfpath[:selfpath.rfind('/')]+\"/configuration.txt\" true_output = os.path.abspath(args.output_directory) if true_output[-1] != \"/\":", "files.') parser.add_argument('-l', '--libraries', required=True, nargs='+', help=\"Fastq libraries to use for assembly and variant", "if noredubusco != False: copyfile(true_output+name+\"_no_reduc_busco/\"+noredubusco, true_output+name+\"_no_reduc.busco\") noredubusco = true_output+name+\"_no_reduc.busco\" def parse_no_varcall(no_varcall): vcf, bam,", "it uses job ID### job_ID = args.job_id if args.job_id else id_generator() name =", "to reduce the percent to avoid freezing other tasks of the computer during", "parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the prefered one for the variant", "calling protocol.') parser.add_argument('-c', '--configuration', default=False, help=\"Configuration file. By default will use ./configuration.txt as", "\"prepared_libraries.txt\") ###Checks that the output is not a file. If it does not", "+ \"\\n\") karyonjobfile.write(\"mv \" + name + \"_no_reduc \" + true_output+name+\"_no_reduc_busco\\n\") #karyonjobfile.write(\"cp \"", "-1 and noredubusco != False: if e.find(\"specific\") == -1: noredubusco = e if", "report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco) df2.to_csv(true_output+\"Report/report\"+name+\".csv\", index=False) ###We clean the", "tmp files!\") if __name__ == '__main__': t0 = datetime.now() try: main() except KeyboardInterrupt:", "\" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly =", "mybusco = i break if i.find('short_summary') > -1 and mybusco != False: if", "default=False, help='If this tag is active it will skip the assembly step. It", "parser.parse_args() def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) ###Parses", "== \"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,''] if args.favourite == False: for", "print ('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0] if home[-1] != \"/\": home", "or args.genome_assembler == \"SOAPdenovo\": from soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile,", "avoid freezing other tasks of the computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number", "+ os.path.abspath(chunk[0]) + \" \" libstring = libstring + backstring champion = select_champion(prepared_libs)", "else: os.remove(home + \"tmp/\"+e+\"/\"+i) if '_keep_existing_' in os.listdir(home + \"tmp/\"+e): continue else: os.rmdir(home", "else: os.mkdir(args.output_directory) elif args.try_again == False: os.rmdir(args.output_directory) os.mkdir(args.output_directory) os.mkdir(path_tmp_jobid) from karyonplots import katplot,", "= open(true_output+name+\"_karyon.job\", 'a') karyonjobfile.write(\"\\n\") switch = False if args.no_assembly == False: if args.genome_assembler", "config_dict = {} prev = 0 for line in open(config): if line[0] ==", "args.genome_assembler == \"Platanus\": if args.no_reduction == True: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i", "\" + true_output+name+\"_busco/short_summary.specific.*.txt \" + true_output+name+\"_no_reduc.busco\\n\") karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job", "(default=1), but it may be useful to reduce the percent to avoid freezing", "karyonplots import katplot, allplots katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") vcf, bam, mpileup = parse_no_varcall(args.no_varcall)", "soap_recipee import soap_recipee soap_recipee(prepared_libs, name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r", "i[-4:] == \".bam\": bam = i if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\"", "e break if e.find('short_summary') > -1 and noredubusco != False: if e.find(\"specific\") ==", "if e.find('short_summary') > -1 and noredubusco != False: if e.find(\"specific\") == -1: noredubusco", "karyonjobfile.write(\"rm -r busco_downloads\\n\") karyonjobfile.close() #5) Create job file that calls all the programs", "default=20, help=\"Maximum number of scaffolds to plot for scaffold-specific plots. Default is 20.\")", "\"\", config_dict[\"KAT\"][0], \"\") ###Parses the libraries and checks their parameters for downstream analyses.", "future it should use config file to select the assembly program to use###", "from report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\",", "True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return vcf,", "and checks their parameters for downstream analyses. Also performs trimming.### print ('###############') print", "help='If this tag is active, the program will not remove all intermediary files", "If set a number lower than total, it will calculate memory usage based", "= i.split() if chunk[5] == \"1\": libstring = libstring + os.path.abspath(chunk[0]) + \"", "config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o \"+true_output+\"soapdenovo \"+ \"-j \"+true_output+name+\"_karyon.job\")", "\"tmp/\"+job_ID+\"/trimmomatic.job\", true_output, False) os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory):", "\".vcf\": vcf = i if os.path.isfile(i+\".vcf\") == True: vcf = i+\".vcf\" if os.path.isfile(i+\"raw.vcf\")", "###Calling spades_recipee.py to generate the assembly job. In the future it should use", "the trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active it will", "\" + \"-i \" + no_red_assembly + \" -o \" + name+\"_no_reduc\" +", "is active, the program will not remove all intermediary files in the folder", "This is set to accelerate the assembly process and improve the results def", "i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly + \" -o \"", "the output files. If omitted, it will generate a random string. This random", "\"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) no_red_assembly = true_output+\"redundans_output/contigs.fa\" assembly", "\" \" + true_output+name+\"_busco\\n\") #karyonjobfile.write(\"cp \" + true_output+name+\"_busco/short_summary*.txt \" + true_output+name+\".busco\\n\") karyonjobfile.write(\"rm -r", "total, it will calculate memory usage based on the fraction of nodes set", "help='Memory limit for all the programs set in Gb. By default it will", "\" + \"-i \" + reduced_assembly + \" -o \" + name +", "ram_limit, n_nodes) assembly = true_output+\"spades/scaffolds.fasta\" no_red_assembly = true_output+\"spades/scaffolds.fasta\" elif args.genome_assembler == \"platanus\" or", "help='If this tag is active, BUSCO analysis will be ommited.') parser.add_argument('-P', '--no_plot', action='store_true',", "int(args.nodes) if not args.memory_limit: ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction)) if n_nodes < total_nodes:", "default=1, help='Proportion of total memory to use by all programs. By default it", "calling. Unsuitable libraries for any of the steps will be ignored. Required.\") parser.add_argument('-F',", "chunk = i.split() if chunk[5] == \"2\": continue else: parse_dict[chunk[0]] = chunk[1:] champion=[0,'']", "os.system(\"bash \" + home + \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\") >", "\".bam\": bam = i if os.path.isfile(i+\".bam\") == True: bam = i+\".bam\" if os.path.isfile(i+\".sorted.bam\")", "katplot, allplots from report import report, ploidy_veredict katplot(reduced_assembly, champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df =", "'/_keep_existing_', 'w') existence.close() print (\"Now I'm cleaning tmp...\") if args.keep_tmp == True: print", "select_champion(fastq): parse_dict = {} for i in open(fastq): chunk = i.split() if chunk[5]", "= true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output -i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1])", "if os.path.isfile(i+\"raw.vcf\") == True: vcf = i+\"raw.vcf\" return vcf, bam, mpileup os.mkdir(true_output+\"Report/\") if", "-1: mybusco = i if mybusco != False: copyfile(true_output+name+\"_busco/\"+mybusco, true_output+name+\".busco\") mybusco = true_output+name+\".busco\"", "in no_varcall: if i[-4:] == \".bam\": bam = i if os.path.isfile(i+\".bam\") == True:", "+ \"tmp/\"+job_ID+\"/trimmomatic.job\") for i in os.listdir(args.output_directory): if i.find(\"parsed_\") > -1: libs_parsed = libs_parsed", "+ true_output + i preparation(libs_parsed.split(), 10000, prepared_libs) ###Parsing library names, including putting absolute", "freezing other tasks of the computer during peaks.') parser.add_argument('-n', '--nodes', default=False, help='Number of", "all the programs if args.no_varcall == False: var_call(prepared_libs, config_dict, true_output, name, args.favourite, home,", "action='store_true', default=False, help='If this tag is active, the program will not launch the", "-i \"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2", "continue config_dict[prev][2] = line[1:-1] + \" \" return config_dict ###Selects the main library", "print ('Trimmomatic') print ('###############') if config_dict['trimmomatic'][1] == '': trimmo_commands = '' else: trimmo_commands", "different programs. If false, the program will assign a name consisting of a", "not remove all intermediary files in the folder tmp after it has finished')", "args.libraries: libs = libs + \" \" + os.path.abspath(i) preparation(libs.split(), 10000, prepared_libs) libs_parsed", "\"+libstring+\" -t \"+str(n_nodes)+\" \"+config_dict[\"redundans\"][1]) reduced_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" elif args.no_reduction == False and switch", "champion[1], config_dict[\"KAT\"][0], true_output+\"Report/\") df = allplots(int(args.window_size), true_output+name+\".raw.vcf\", reduced_assembly, true_output+name+\".sorted.bam\", true_output+name+\".mpileup\", os.path.abspath(champion[-1]), config_dict['nQuire'][0], config_dict[\"KAT\"][0],", "name, true_output+\"soapdenovo/\", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0]) print (\"python3 \"+os.path.dirname(__file__)+\"/soap_recipee.py -r \"+prepared_libs+\" -n \"+name+\" -o", "if args.nodes and int(args.nodes) < total_nodes: n_nodes = int(args.nodes) if not args.memory_limit: ram_limit", "= backstring + os.path.abspath(chunk[0]) + \" \" libstring = libstring + backstring champion", "help=\"Will ignore scaffolds with length above the given threshold\") parser.add_argument('-a', '--try_again', default=False, action='store_true',", "ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes)) else: ram_limit = args.memory_limit * int(args.memory_fraction)", "datetime import datetime parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--output_directory', required=True, help='Directory where", "\"+str(job_ID)) print (\"Job name: \"+str(name)) print ('###############') config_dict = parse_config(config_path) home = config_dict[\"karyon\"][0]", "to use for assembly and variant calling. Unsuitable libraries for any of the", "but it may be useful to reduce the percent to avoid freezing other", "+ i[:-1] karyonjobfile.write(\"\\n\") karyonjobfile.write(config_dict['BUSCO'][0]+\"busco \" + \"-i \" + reduced_assembly + \" -o", "assembly busco_options = \"\" if args.no_busco == False: for i in config_dict['BUSCO'][1:]: busco_options", "\"+config_dict[\"redundans\"][1] + \" --noreduction\") no_red_assembly = true_output+\"redundans_output/scaffolds.filled.fa\" else: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+ \" -o \"+true_output+\"redundans_output", "elif line[0] == \"?\": if config_dict[prev][2] != \"\": continue config_dict[prev][2] = line[1:-1] +", "all the programs.### def parse_config(config): config_dict = {} prev = 0 for line", "False and switch == False: karyonjobfile.write(\"python2 \"+config_dict['redundans'][0]+\"redundans.py\"+\" -f \"+ assembly + \" -o", "libs = '' for i in args.libraries: libs = libs + \" \"", "results def select_champion(fastq): parse_dict = {} for i in open(fastq): chunk = i.split()", "the end of the the variant calling step.\") parser.add_argument('-w', '--window_size', default=1000, help='Window size", "it may be useful to reduce the percent to avoid freezing other tasks", "parse_dict: if int(parse_dict[element][2]) > champion[0]: champion = [int(parse_dict[element][2]), element] else: champion = [0,args.favourite]", "trimming step.') parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active it will skip" ]
[ "sqlite3 db = sqlite3.connect('data.db') c=db.cursor() c.execute('create table users (id integer primary key, name", "<gh_stars>0 import sqlite3 db = sqlite3.connect('data.db') c=db.cursor() c.execute('create table users (id integer primary", "db = sqlite3.connect('data.db') c=db.cursor() c.execute('create table users (id integer primary key, name text,", "c=db.cursor() c.execute('create table users (id integer primary key, name text, partner text, recieve_date", "= sqlite3.connect('data.db') c=db.cursor() c.execute('create table users (id integer primary key, name text, partner", "c.execute('create table users (id integer primary key, name text, partner text, recieve_date date,", "sqlite3.connect('data.db') c=db.cursor() c.execute('create table users (id integer primary key, name text, partner text,", "table users (id integer primary key, name text, partner text, recieve_date date, partnered_date", "users (id integer primary key, name text, partner text, recieve_date date, partnered_date date)')", "(id integer primary key, name text, partner text, recieve_date date, partnered_date date)') db.commit()", "import sqlite3 db = sqlite3.connect('data.db') c=db.cursor() c.execute('create table users (id integer primary key,", "integer primary key, name text, partner text, recieve_date date, partnered_date date)') db.commit() db.close()" ]
[ "DOXY_VERSION + '.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR", "pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as f: f.write(updated)", "# Update changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}'", "minor, patch = version.split('.') def replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read())", "SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release =", "\"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py',", "import date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.')", "f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*'", "re from datetime import date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor,", "Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\", f\"\"\"version = '{major}.{minor}' release", "datetime import date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch =", "import sys, re from datetime import date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d')", "= '---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) #", "\"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version =", "open(file_path).read()) with open(file_path, 'w') as f: f.write(updated) # Update changelog SEP = '---------------------'", "as f: f.write(updated) # Update changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header", "replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as f: f.write(updated) #", "from datetime import date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch", "CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH", "= date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def replace(file_path, pattern, replacement): updated =", "'---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update", "# Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION", "with open(file_path, 'w') as f: f.write(updated) # Update changelog SEP = '---------------------' NEXT", "version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\")", "\"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release", "Update changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md',", "replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''')", "# Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\", f\"\"\"version = '{major}.{minor}'", "replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as f:", "\"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\",", "= f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION", "= ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt',", "({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile',", "replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\)", "version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def replace(file_path,", "date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def replace(file_path, pattern, replacement): updated = re.sub(pattern,", "+ '.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\)", "+ version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR", "# Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR", "replacement, open(file_path).read()) with open(file_path, 'w') as f: f.write(updated) # Update changelog SEP =", "Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\", f\"\"\"version = '{major}.{minor}' release =", "SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx", "= 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) # Update", "NEXT, changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION +", "changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*',", "' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR", "f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION =", "Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\")", "NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile", "= version.split('.') def replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path,", "date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def", "f: f.write(updated) # Update changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header =", "SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\", f\"\"\"version", "Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION +", "patch = version.split('.') def replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with", "Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version)", "SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version", "sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def replace(file_path, pattern, replacement):", "DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) #", "def replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as", "\"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') # Update", "re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as f: f.write(updated) # Update changelog SEP", "f.write(updated) # Update changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version}", "changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER", "replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\", f\"\"\"version = '{major}.{minor}' release = '{major}.{minor}.{patch}'\"\"\")", "f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = '", "= re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as f: f.write(updated) # Update changelog", "\"{patch}\")''') # Update Sphinx replace('doc/source/conf.py', \"\"\"version = '.*' release = '.*'\"\"\", f\"\"\"version =", "= f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER =", "DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''',", "replace('CHANGELOG.md', NEXT, changelog_header) # Update Doxyfile DOXY_VERSION = 'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION", "updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w') as f: f.write(updated) # Update", "'.*', DOXY_VERSION + version) # Update CMakeLists.txt replace('CMakeLists.txt', '''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH", "open(file_path, 'w') as f: f.write(updated) # Update changelog SEP = '---------------------' NEXT =", "sys, re from datetime import date version = sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major,", "changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT,", "major, minor, patch = version.split('.') def replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement,", "'w') as f: f.write(updated) # Update changelog SEP = '---------------------' NEXT = f'Next\\n{SEP}'", "release_date = date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def replace(file_path, pattern, replacement): updated", "= sys.argv[1] release_date = date.today().strftime('%Y-%m-%d') major, minor, patch = version.split('.') def replace(file_path, pattern,", "'''SET\\\\(CBOR_VERSION_MAJOR \"0\"\\\\) SET\\\\(CBOR_VERSION_MINOR \"7\"\\\\) SET\\\\(CBOR_VERSION_PATCH \"0\"\\\\)''', f'''SET(CBOR_VERSION_MAJOR \"{major}\") SET(CBOR_VERSION_MINOR \"{minor}\") SET(CBOR_VERSION_PATCH \"{patch}\")''') #", "'PROJECT_NUMBER = ' replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version) # Update CMakeLists.txt", "version.split('.') def replace(file_path, pattern, replacement): updated = re.sub(pattern, replacement, open(file_path).read()) with open(file_path, 'w')", "SEP = '---------------------' NEXT = f'Next\\n{SEP}' changelog_header = f'{NEXT}\\n\\n{version} ({release_date})\\n{SEP}' replace('CHANGELOG.md', NEXT, changelog_header)" ]
[]
[ "] if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "for m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1,", "bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2',", "dropblock else DropBlock()), ('semodule', None if not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU,", "('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride,", "channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)),", "channels * radix, kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels *", "('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)),", "('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels,", "not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization,", "m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups,", "1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1,", "[ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2',", "padding=1, stride=1, groups=1, bias=False)), ] if m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1,", "m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck,", "is not None)) else: channels = growth * expansion super().__init__(collections.OrderedDict(m for m in", "kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else", "not dropblock else DropBlock()), ] if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def", "bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1,", "('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None", "else DropBlock()), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d(", "stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1',", "[ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1',", "padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()),", "def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels =", "dropblock else DropBlock()), ] if m[1] is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self,", "not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride, expansion, normalization, activation,", "out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is", "TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "dropblock else DropBlock()), ] if m[1] is not None)) class MobileNetOperation(nn.Sequential): def __init__(self,", "if not dropblock else DropBlock()), ] if m[1] is not None)) class BottleneckOperation(nn.Sequential):", "class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth, expansion, normalization, activation, dropblock, **kwargs):", "stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not dropblock else DropBlock()), ]", "('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels,", "SEModule, SKConv2d, BlurPool2d, SplitAttentionModule import torch.nn as nn import collections class BasicOperation(nn.Sequential): def", "None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock,", "else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)),", "is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization,", "kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else", "DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2',", "nn import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization,", "else DropBlock()), ] if m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels,", "nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if", "channels = int(in_channels * expansion) modules = [] if in_channels != channels: modules.extend([", "in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)),", "out_channels, stride, growth, expansion, normalization, activation, dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m", "class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock,", "('act2', activation(inplace=True)), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d(", "kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if m[1] is not None)) class SingleActBasicOperation(nn.Sequential):", "padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()),", "growth * expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if", "channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1',", "out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels /", "collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock,", "DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2',", "not dropblock else DropBlock()), ] if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def", "stride != 1: super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1',", "else DropBlock()), ] if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels,", "SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not dropblock else DropBlock()), ]", "expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock", "* expansion) modules = [] if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels,", "expansion, normalization, activation, dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for m in", "not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation,", "activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)),", "None if not dropblock else DropBlock()), ] if m[1] is not None)) class", "None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock,", "('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)), ] if m[1] is", "channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not", "* radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None if not dropblock else", "('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)), ] if m[1] is not", "('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('semodule',", "DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if", "kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if not dropblock else DropBlock()), ('pool',", "('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d(", "else DropBlock()), ] if m[1] is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels,", "DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None)) class", "**kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('act1',", "('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None", "else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1, stride=1,", "bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride,", "bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ])", "m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1',", "1: super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels,", "DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2',", "for m in modules if m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self,", "in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not", "] if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization,", "('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride == 1", "('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride", "radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride == 1 else nn.AvgPool2d( kernel_size=3,", "channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not", "dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups,", "bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3',", "kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not", "**kwargs): channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [", "kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel", "] if m[1] is not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "!= 1: super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d(", "activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "if m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups,", "else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)), ]", "channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock", "('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not None)) else: channels = growth", "* radix)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule(", "else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)),", "else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)),", "('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ] if m[1] is", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3,", "SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if", "bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ] if m[1]", "dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample',", "for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1,", "else DropBlock()), ] if m[1] is not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels,", "stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)),", "== 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0,", "stride=1, groups=1, bias=False)), ] if m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self,", "round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1',", "in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels /", "nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if", "channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None", "class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()),", "in modules if m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3,", "m[1] is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck,", "channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1',", "activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if", "not dropblock else DropBlock()), ] if m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def", "nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if", "('norm2', normalization(channels * radix)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)),", "not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride == 1 else", "('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels", "groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)),", "padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()),", "if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride == 1", "if not dropblock else DropBlock()), ('pool', None if stride == 1 else BlurPool2d(channels,", "in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)),", "in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not", "padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not dropblock else DropBlock()),", "channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if", "('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None", "activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2',", "m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck,", "normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2',", "channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not", "] if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "not dropblock else DropBlock()), ] if m[1] is not None)) class DenseNetOperation(nn.Sequential): def", "in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "('drop2', None if not dropblock else DropBlock()), ('semodule', None if not seoperation else", "kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else", "modules if m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not", "DropBlock()), ] if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "radix=radix, groups=groups)), ('drop2', None if not dropblock else DropBlock()), ('pool', None if stride", "SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride == 1 else", "dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1',", "SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if not dropblock", "] if m[1] is not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel,", "radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m", "groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ('act3', activation(inplace=True)),", "dropblock else DropBlock()), ] if m[1] is not None)) class DenseNetOperation(nn.Sequential): def __init__(self,", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1,", "if not dropblock else DropBlock()), ] if m[1] is not None)) class DenseNetOperation(nn.Sequential):", "bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock", "SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0,", "nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if", "sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3',", "[ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1,", "DropBlock()), ] if m[1] is not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "channels, channels * radix, kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels", "m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1',", "m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0,", "super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock else", "round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1,", "normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels,", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix,", "None if stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels,", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels,", "for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)),", "class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock,", "nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if", "kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else", "groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not None)) else: channels", "nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2',", "('drop2', None if not dropblock else DropBlock()), ('pool', None if stride == 1", "if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0,", "if not dropblock else DropBlock()), ] if m[1] is not None)) class PreActBasicOperation(nn.Sequential):", "stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m", "else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1,", "padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()),", "bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not dropblock else DropBlock()), ] if m[1]", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1,", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1,", "DropBlock()), ] if m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "DropBlock()), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels,", "out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock", "not dropblock else DropBlock()), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),", "('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d(", "in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels", "SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)), ] if", "not dropblock else DropBlock()), ] if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def", "stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not None)) else:", "out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck", "[ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1',", "groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1,", "in_channels, out_channels, kernel, stride, expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels", "not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels,", "('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1, stride=1, groups=groups *", "normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('semodule', None if not seoperation", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels,", "groups=groups * radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None if not dropblock", "[ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1',", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3',", "if not dropblock else DropBlock()), ] if m[1] is not None)) class MobileNetOperation(nn.Sequential):", "padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None if", "radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None if not dropblock else DropBlock()),", "groups=groups)), ('drop2', None if not dropblock else DropBlock()), ('pool', None if stride ==", "channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not", "in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None", "activation, dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [", "kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else", "('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if not", "stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ('act3',", "from .modules import DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule import torch.nn as nn import", "nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if", "radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck *", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels *", "SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not", "if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix,", "= [] if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0,", "growth, expansion, normalization, activation, dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for m", "else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)),", "m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups,", "stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ]", "None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation,", "**kwargs): channels = int(in_channels * expansion) modules = [] if in_channels != channels:", "* radix, kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels * radix)),", "channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not", "m in [ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock else DropBlock()), ('act1',", "if m[1] is not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride,", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride,", "else DropBlock()), ] if m[1] is not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels,", "stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ])", "not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1,", "('drop4', None if not dropblock else DropBlock()), ] if m[1] is not None))", "not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation,", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1,", "in [ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)),", "class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride, expansion, normalization, activation, dropblock, seoperation, sereduction,", "import DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule import torch.nn as nn import collections class", "kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else", "**kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)),", "normalization, activation, dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for m in [", "('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels,", "DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2',", "def __init__(self, in_channels, out_channels, stride, growth, expansion, normalization, activation, dropblock, **kwargs): if stride", "class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "] if m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None))", "nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if", "('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)),", "stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2',", "groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck * groups)", "BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)),", "padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()),", "is not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride, expansion, normalization,", "not dropblock else DropBlock()), ] if m[1] is not None)) class BottleneckOperation(nn.Sequential): def", "bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2',", "for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)),", "else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1,", "padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3',", "('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels,", "] if m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else", "('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d(", "('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None", "('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)),", "normalization(out_channels)), ('drop4', None if not dropblock else DropBlock()), ] if m[1] is not", "not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization,", "if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules if m[1]", "groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not dropblock else DropBlock()), ] if", "dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1,", "else DropBlock()), ('semodule', None if not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)),", "else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3',", "kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else", "DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1, stride=1, groups=groups", "normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2',", "padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not dropblock else DropBlock()),", "('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth,", "if m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix,", "activation, dropblock, **kwargs): channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m", "else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "== 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1,", "for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride,", ".modules import DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule import torch.nn as nn import collections", "normalization=normalization, activation=activation)), ('downsample', None if stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)),", "('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d(", "not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride,", "bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not None)) else: channels =", "padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()),", "None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation,", "BlurPool2d, SplitAttentionModule import torch.nn as nn import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels,", "radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)),", "__init__(self, in_channels, out_channels, stride, growth, expansion, normalization, activation, dropblock, **kwargs): if stride !=", "('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('semodule', None if not", "activation(inplace=True)), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels,", "super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups,", "normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels,", "radix, kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2',", "normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels,", "stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck)", "None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation,", "in_channels, out_channels, stride, growth, expansion, normalization, activation, dropblock, **kwargs): if stride != 1:", "('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d(", "dropblock else DropBlock()), ] if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self,", "('downsample', None if stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d(", "('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)),", "if m[1] is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups,", "dropblock else DropBlock()), ] if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self,", "channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels,", "None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1,", "('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None", "None if not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1,", "not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)),", "nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if", "DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3',", "not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth, expansion, normalization, activation,", "('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None", "groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for", "nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if", "dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1,", "else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),", "nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if", "stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck *", "super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels,", "/ bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0,", "groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ] if", "is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization,", "normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels,", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1,", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1,", "('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([", "normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in", "= round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None", "channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if m[1] is not None))", "('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels,", "round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels,", "DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3',", "m[1] is not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck,", "if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1,", "activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3',", "None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride, expansion, normalization, activation, dropblock,", "stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2',", "kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else", "out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck)", "* groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels,", "else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel //", "BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "else: channels = growth * expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)),", "nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if", "for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not dropblock else DropBlock()), ] if", "('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels,", "if not dropblock else DropBlock()), ] if m[1] is not None)) class SelectedKernelOperation(nn.Sequential):", "dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1',", "dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)),", "padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()),", "else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)),", "else DropBlock()), ] if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels,", "not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1,", "groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m", "else DropBlock()), ] if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels,", "else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)),", "if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1,", "stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3',", "('drop2', None if not dropblock else DropBlock()), ] if m[1] is not None))", "kernel, stride, expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels", "import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation,", "not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1,", "None if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules if", "else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)),", "dropblock, **kwargs): channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in", "None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3,", "None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups,", "None if not dropblock else DropBlock()), ('semodule', None if not seoperation else SEModule(", "if not dropblock else DropBlock()), ('semodule', None if not seoperation else SEModule( channels,", "groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)),", "seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels,", "bias=False)), ] if m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "import torch.nn as nn import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1,", "groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride,", "is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization,", "!= channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1',", "kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not dropblock else", "if m[1] is not None)) else: channels = growth * expansion super().__init__(collections.OrderedDict(m for", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0,", "if stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels,", "groups=1, bias=False)), ] if m[1] is not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels,", "('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d(", "None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock,", "padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if not dropblock else DropBlock()), ('pool', None", "('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels,", "1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for", "if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1,", "nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)), ] if m[1] is not None))", "padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock", "* groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0,", "radix)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels,", "/ bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d(", "as nn import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck,", "normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1',", "nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)),", "('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix,", "round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if", "stride=1, radix=radix, groups=groups)), ('drop2', None if not dropblock else DropBlock()), ('pool', None if", "activation, dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for m in [ ('norm1',", "normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels,", "None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3,", "not None)) else: channels = growth * expansion super().__init__(collections.OrderedDict(m for m in [", "DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3',", "channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1',", "is not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization,", "for m in [ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock else DropBlock()),", "channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1',", "modules = [] if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1,", "m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups,", "('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels,", "torch.nn as nn import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups,", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel,", "('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)),", "if not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0,", "out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if m[1] is not None)) class", "channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride == 1 else nn.AvgPool2d(", "('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride", "('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if m[1] is", "stride=1, groups=groups * radix, bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None if not", "kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3',", "bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3,", "2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()),", "else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None", "DropBlock()), ] if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m", "def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "('drop3', None if not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels,", "m[1] is not None)) class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride, expansion,", "in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)),", "DropBlock()), ('semodule', None if not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2',", "channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3',", "channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock", "out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not dropblock", "modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1',", "('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None", "= int(in_channels * expansion) modules = [] if in_channels != channels: modules.extend([ ('conv1',", "activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels * expansion) modules =", "BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock", "/ bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1,", "stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck", "bias=False)), ('norm2', normalization(channels * radix)), ('drop2', None if not dropblock else DropBlock()), ('act2',", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1,", "normalization(channels * radix)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention',", "normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2',", "out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not dropblock", "if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0,", "not dropblock else DropBlock()), ] if m[1] is not None)) class PreActBasicOperation(nn.Sequential): def", "DropBlock()), ] if m[1] is not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "is not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth, expansion, normalization,", "[ ('norm1', normalization(in_channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1',", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1,", "if m[1] is not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups,", "in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1]", "__init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels", "SplitAttentionModule import torch.nn as nn import collections class BasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1,", "('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None", "__init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels =", "DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule import torch.nn as nn import collections class BasicOperation(nn.Sequential):", "if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization,", "class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock,", "super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1,", "]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)),", "* expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if not", "None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3,", "bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ] if m[1]", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups,", "if m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix,", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1,", "bias=False)), ('norm4', normalization(out_channels)), ('drop4', None if not dropblock else DropBlock()), ] if m[1]", "('norm1', normalization(in_channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d(", "bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool',", "not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation,", "**kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d(", "normalization(in_channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels,", "stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3',", "groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('semodule', None", "class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "/ bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels,", "nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not None)) else: channels = growth *", "m[1] is not None)) else: channels = growth * expansion super().__init__(collections.OrderedDict(m for m", "not None)) class SingleActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation,", "m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1',", "// 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else", "else DropBlock()), ] if m[1] is not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels,", "out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock", "padding=1, stride=stride, groups=groups, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()),", "('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ]", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2',", "sesigmoid, **kwargs): channels = int(in_channels * expansion) modules = [] if in_channels !=", "stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1,", "not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation,", "None if not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3',", "bottleneck, normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m", "padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()),", "SKConv2d, BlurPool2d, SplitAttentionModule import torch.nn as nn import collections class BasicOperation(nn.Sequential): def __init__(self,", "sereduction, sesigmoid, **kwargs): channels = int(in_channels * expansion) modules = [] if in_channels", "channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not", "None if not dropblock else DropBlock()), ('pool', None if stride == 1 else", "= round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)),", "if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups,", "if not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d(", "m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck,", "in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not", "activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2',", "normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels * expansion) modules", "normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ] if m[1] is not", "dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules if m[1] is not", "out_channels, kernel, stride, expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels =", "if not dropblock else DropBlock()), ] if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential):", "if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1,", "DropBlock()), ] if m[1] is not None)) class BottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "activation(inplace=True)), ('attention', SplitAttentionModule( channels, radix=radix, groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride ==", "('norm2', normalization(out_channels)), ('drop2', None if not dropblock else DropBlock()), ] if m[1] is", "('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)),", "dropblock else DropBlock()), ] if m[1] is not None)) class BottleneckOperation(nn.Sequential): def __init__(self,", "stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1',", "round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels,", "is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,", "channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if not dropblock else DropBlock()),", "stride=stride)), ] if m[1] is not None)) else: channels = growth * expansion", "channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not", "DropBlock()), ] if m[1] is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "]) super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None)) class SplitAttentionOperation(nn.Sequential):", "m[1] is not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth, expansion,", "('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None", "in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm1', normalization(channels)),", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1, stride=1, groups=groups * radix,", "nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if m[1] is not", "('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3, padding=1, stride=1, groups=groups * radix, bias=False)),", "int(in_channels * expansion) modules = [] if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d(", "super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def", "not dropblock else DropBlock()), ] if m[1] is not None)) class MobileNetOperation(nn.Sequential): def", "m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1,", "nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2', normalization(channels)), ('drop2',", "normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if", "None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1,", "if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups,", "activation=activation)), ('downsample', None if stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride, padding=1)), ('conv3',", "not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules if m[1] is", "is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,", "nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ] if m[1]", "dropblock else DropBlock()), ] if m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self,", "super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0,", "normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ] if m[1] is not", "expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels * expansion)", "dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1,", "= round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d(", "= round(out_channels / bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d(", "('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)),", "channels = growth * expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1',", "DropBlock()), ('act3', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4',", "('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2,", "expansion) modules = [] if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels,", "TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not dropblock", "channels, kernel_size=1, padding=0, stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock", "modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels, bias=False)), ('norm2',", "/ bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None if not", "groups=groups, normalization=normalization, activation=activation)), ('downsample', None if stride == 1 else nn.AvgPool2d( kernel_size=3, stride=stride,", "None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock,", "stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2',", "stride, expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels *", "[] if in_channels != channels: modules.extend([ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1,", "('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)),", "] if m[1] is not None)) else: channels = growth * expansion super().__init__(collections.OrderedDict(m", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix,", "] if m[1] is not None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "stride=stride, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1',", "kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None if not dropblock else", "('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ] if m[1] is", "DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2',", "channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock", "is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,", "groups=groups, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not dropblock else DropBlock()), ('act1', activation(inplace=True)),", "activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2',", "kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not", "if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1,", "('drop3', None if not dropblock else DropBlock()), ] if m[1] is not None))", "groups=groups)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv3',", "= round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels,", "channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('drop2', None if not dropblock else", "dropblock, seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels * expansion) modules = []", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3,", "in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm1', normalization(channels)), ('drop1', None if not", "bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels,", "[ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2',", "m[1] is not None)) class SelectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, radix, groups,", "normalization, activation, dropblock, **kwargs): channels = round(out_channels / bottleneck) super().__init__(collections.OrderedDict(m for m in", "super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=stride, groups=1,", "padding=1, stride=1, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()),", "('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2', None", "('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4', None", "BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)),", "] if m[1] is not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride,", "('drop3', None if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m in modules", "stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('act2',", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3,", "('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)),", "kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else", "('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None", "out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock", "activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm4', normalization(out_channels)), ('drop4',", "dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride == 1 else BlurPool2d(channels,", "('semodule', None if not seoperation else SEModule( channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)),", "if m[1] is not None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth,", "PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs): channels", "channels, kernel_size=3, padding=1, stride=stride, groups=groups, bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock", "('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)),", "None)) class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock,", "/ bottleneck) super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels,", "else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)),", "dropblock else DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)),", "if not dropblock else DropBlock()), ] if m[1] is not None)) class TweakedBottleneckOperation(nn.Sequential):", "DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth, expansion, normalization, activation, dropblock, **kwargs): if", "('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, growth, kernel_size=3, padding=1, stride=1, bias=False)), ] if m[1]", "channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)), ('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1,", "class MobileNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, kernel, stride, expansion, normalization, activation, dropblock, seoperation,", "DropBlock()), ('act2', activation(inplace=True)), ('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ]", "seoperation, sereduction, sesigmoid, **kwargs): channels = int(in_channels * expansion) modules = [] if", "m in modules if m[1] is not None)) class SplitAttentionOperation(nn.Sequential): def __init__(self, in_channels,", "if stride != 1: super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)),", "= growth * expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('drop1', None", "padding=0, stride=1, groups=1, bias=False)), ('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)), ] if m[1] is not None))", "DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups, bias=False)), ('norm2',", "('act2', activation(inplace=True)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)),", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1,", "None)) else: channels = growth * expansion super().__init__(collections.OrderedDict(m for m in [ ('norm1',", "DropBlock()), ] if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential): def __init__(self, in_channels, out_channels,", "super().__init__(collections.OrderedDict(m for m in [ ('norm1', normalization(in_channels)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3, padding=1,", "bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for", "__init__(self, in_channels, out_channels, kernel, stride, expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid, **kwargs):", "bias=False)), ('norm3', normalization(channels)), ('drop3', None if not dropblock else DropBlock()), ('act3', activation(inplace=True)), ('conv3',", "dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels, kernel_size=3, padding=1, stride=1, groups=groups,", "stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ]", "('norm4', normalization(out_channels)), ('drop4', None if not dropblock else DropBlock()), ] if m[1] is", "('conv2', nn.Conv2d( channels, out_channels, kernel_size=3, padding=1, stride=1, groups=1, bias=False)), ('norm2', normalization(out_channels)), ('drop2', None", "class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None if", "('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ]) super().__init__(collections.OrderedDict(m for m", "groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None if not dropblock else DropBlock()), ] if", "normalization(out_channels)), ('drop2', None if not dropblock else DropBlock()), ] if m[1] is not", "('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm3', normalization(out_channels)), ('drop3', None", "('conv1', nn.Conv2d( in_channels, channels, kernel_size=1, padding=0, stride=1, groups=1, bias=False)), ('norm2', normalization(channels)), ('drop2', None", "None if not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, channels, kernel_size=3,", "dropblock else DropBlock()), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3',", "not dropblock else DropBlock()), ('semodule', None if not seoperation else SEModule( channels, reduction=sereduction,", "stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3', nn.Conv2d( channels, out_channels, kernel_size=1, padding=0, stride=1,", "None)) class DenseNetOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, growth, expansion, normalization, activation, dropblock,", "DropBlock()), ('act1', activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2,", "in [ ('norm1', normalization(in_channels)), ('act1', activation(inplace=True)), ('conv1', nn.Conv2d( in_channels, out_channels, kernel_size=1, padding=0, stride=1,", "stride, growth, expansion, normalization, activation, dropblock, **kwargs): if stride != 1: super().__init__(collections.OrderedDict(m for", "activation(inplace=True)), ]) modules.extend([ ('conv2', nn.Conv2d( channels, channels, kernel_size=kernel, padding=kernel // 2, stride=stride, groups=channels,", "activation(inplace=True)), ('conv2', SKConv2d( channels, channels, kernel_size=3, padding=1, stride=1, radix=radix, groups=groups)), ('norm2', normalization(channels)), ('drop2',", "is not None)) class TweakedBottleneckOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization,", "None if not dropblock else DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride ==", "class PreActBasicOperation(nn.Sequential): def __init__(self, in_channels, out_channels, stride, groups, bottleneck, normalization, activation, dropblock, **kwargs):", "bias=False)), ('norm2', normalization(channels)), ('drop2', None if not dropblock else DropBlock()), ('semodule', None if", "if not dropblock else DropBlock()), ] if m[1] is not None)) class TweakedSlectedKernelOperation(nn.Sequential):", "DropBlock()), ('act2', activation(inplace=True)), ('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)), ('conv3',", "not dropblock else DropBlock()), ('act1', activation(inplace=True)), ('conv2', nn.Conv2d( channels, channels * radix, kernel_size=3,", "bottleneck * groups) super().__init__(collections.OrderedDict(m for m in [ ('conv1', nn.Conv2d( in_channels, channels, kernel_size=1,", "def __init__(self, in_channels, out_channels, kernel, stride, expansion, normalization, activation, dropblock, seoperation, sereduction, sesigmoid,", "dropblock else DropBlock()), ] if m[1] is not None)) class SingleActBottleneckOperation(nn.Sequential): def __init__(self," ]
[ "#Get StartTimes and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop", "builtin type method #type(app) #More info is available via python built in dir", "each Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create", "list #all the available properties and methods available #dir(app) #Additional useful information is", "= scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign sensor", "#dir(app) #Additional useful information is available via the python builtin help #help(app) app.Visible", "8 sats per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite)", "to constellation object i = 1 for RAAN in range(0,180,45): # 4 orbit", "= STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital", "constellation, used later to hold all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\")", "#Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor", "= STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee =", "\"\"\" import datetime as dt import numpy as np import os #Need to", "= 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType =", "methods available #dir(app) #Additional useful information is available via the python builtin help", "GetActiveObject #Will allow you to connect a running instance of STK #Start the", "Facilites from text file using connect. Each line of the text file is", "66 Sats 781 km (7159 semimajor axis) 86.4 inclination 6 Orbit planes 30", "attach sensors, assign sensor to constellation object i = 1 for RAAN in", "sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor projections sensor2.VO.PercentTranslucency", "->IAgStkObjectRoot #These are not available to import until this point if this is", "keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly +", "# 8 sats per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 =", "chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = [] #Save out", "STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using TwoBody Propagator", "top with all the other import statements from comtypes.gen import STKUtil from comtypes.gen", "facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used", "for each chain, print to console and save to file with open(\"MaxOutageData.txt\", \"a\")", "coding: utf-8 -*- \"\"\" Created on Mon May 4 09:33:16 2020 @author: jvergere", "time for each chain, print to console and save to file with open(\"MaxOutageData.txt\",", "with all the other import statements from comtypes.gen import STKUtil from comtypes.gen import", "#Get complete chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows", "the translucenty of the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1", "newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4", "using TwoBody Propagator and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis", "= True root = app.Personality2 #root ->IAgStkObjectRoot #These are not available to import", "available via the python builtin help #help(app) app.Visible = True app.UserControl = True", "some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False", "you to launch STK #from comtypes.client import GetActiveObject #Will allow you to connect", "later to hold all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 =", "Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from", "el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = [] #Save out the report", "= compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = [] #Save out the report to", "= list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to datetimes", "projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a Chain object", "%Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\")", "2) #Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the", "i+=1 #Create a Chain object for each Facility to the constellation. facCount =", "with python builtin type method #type(app) #More info is available via python built", "= startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1]", "= compAcc.DataSets.RowCount maxOutage = [] #Save out the report to a text file", "keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value", "scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind()", "scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color", "properties and methods available #dir(app) #Additional useful information is available via the python", "for i in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName))", "dir method, which will list #all the available properties and methods available #dir(app)", "after running once they can be #included at the top with all the", "newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width =", "a running instance of STK #Start the application, it will return a pointer", "dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for each chain, print to console and", "86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly", "scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from text file using", "SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to hold all the sensor", "datetime as dt import numpy as np import os #Need to cleanup this", "= 1 for RAAN in range(0,180,45): # 4 orbit planes j = 1", "the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime)", "comtypes.client import CreateObject # Will allow you to launch STK #from comtypes.client import", "the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor projections", "running once they can be #included at the top with all the other", "Chain object for each Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i", "and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert", "#scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun", "launch STK #from comtypes.client import GetActiveObject #Will allow you to connect a running", "startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes =", "Each line of the text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as", "but after running once they can be #included at the top with all", "= sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign sensor to constellation object i", "#Create a Chain object for each Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count", "text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for line in", "constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName", "STK #Start the application, it will return a pointer to the Application Interface", "1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and StopTimes as lists startTimes", "satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to", "09:33:16 2020 @author: jvergere Ideas: Something similar to the Iridium Constellation: 66 Sats", "pointer to the Application Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is", "\"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites", "Initial State using TwoBody Propagator and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye", "will return a pointer to the Application Interface app = CreateObject(\"STK12.Application\") #app =", "list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to datetimes startDatetimes", "each plane \"\"\" import datetime as dt import numpy as np import os", "\"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{}", "chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible =", "Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0", "i in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2", "GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication #type info is available with python", "STKUtil.eDotted j+=1 i+=1 #Create a Chain object for each Facility to the constellation.", "import os #Need to cleanup this file before running each time, #or refactor", "Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to datetimes startDatetimes =", "won't hurt to leave them there, but after running once they can be", "-> IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016", "sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle =", "Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some", "Ideas: Something similar to the Iridium Constellation: 66 Sats 781 km (7159 semimajor", "some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add", "#app is a pointer to IAgUiApplication #type info is available with python builtin", "1 for trueAnomaly in range(0,360,45): # 8 sats per plane #insert satellite newSat", "root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario", "this file before running each time, #or refactor code to avoid writing to", "newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup", "in dir method, which will list #all the available properties and methods available", "as faclist: for line in faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New /", "text file using connect. Each line of the text file is #formatted: #FacName,Longitude,Latitude", "root = app.Personality2 #root ->IAgStkObjectRoot #These are not available to import until this", "root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to hold all the sensor objects sensorConst", "\"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to hold all", "dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage", "stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages =", "semimajor axis) 86.4 inclination 6 Orbit planes 30 degrees apart 11 in each", "= scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color =", "= CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication #type info", "the top with all the other import statements from comtypes.gen import STKUtil from", "of the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create", "running instance of STK #Start the application, it will return a pointer to", "comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario)", "each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor", "plane \"\"\" import datetime as dt import numpy as np import os #Need", "# Will allow you to launch STK #from comtypes.client import GetActiveObject #Will allow", "#Insert Facilites from text file using connect. Each line of the text file", "= False #Buildup Initial State using TwoBody Propagator and Classical Orbital Elements keplarian", "np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d", "comtypes.client import GetActiveObject #Will allow you to connect a running instance of STK", "TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite", "sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add", "STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete", "cleanup this file before running each time, #or refactor code to avoid writing", "sensor to constellation object i = 1 for RAAN in range(0,180,45): # 4", "if this is the first time #running STK via COM with python....it won't", "the python builtin help #help(app) app.Visible = True app.UserControl = True root =", "display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects", "for row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time", "%Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage =", "orbit planes j = 1 for trueAnomaly in range(0,360,45): # 8 sats per", "i = 1 for RAAN in range(0,180,45): # 4 orbit planes j =", "a pointer to IAgUiApplication #type info is available with python builtin type method", "#formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for line in faclist: facData =", "blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to hold all the sensor objects", "keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies", "keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee", "%b %Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage", "this is the first time #running STK via COM with python....it won't hurt", "= stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until {}\".format(facName,", "jvergere Ideas: Something similar to the Iridium Constellation: 66 Sats 781 km (7159", "the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation,", "newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5,", "help #help(app) app.Visible = True app.UserControl = True root = app.Personality2 #root ->IAgStkObjectRoot", "setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand =", "in range(0,360,45): # 8 sats per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j))", "to file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject #", "app.UserControl = True root = app.Personality2 #root ->IAgStkObjectRoot #These are not available to", "app.Personality2 #root ->IAgStkObjectRoot #These are not available to import until this point if", "[] #Save out the report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile:", "Time\").GetValues()) #convert to from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y", "7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN", "#Adjust the translucenty of the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted", "attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False", "30 degrees apart 11 in each plane \"\"\" import datetime as dt import", "for each Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount):", "= False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using TwoBody Propagator and Classical", "Created on Mon May 4 09:33:16 2020 @author: jvergere Ideas: Something similar to", "for stopTime in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start", "86.4 inclination 6 Orbit planes 30 degrees apart 11 in each plane \"\"\"", "%H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for", "4 orbit planes j = 1 for trueAnomaly in range(0,360,45): # 8 sats", "with python....it won't hurt to leave them there, but after running once they", "State using TwoBody Propagator and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye =", "Will allow you to launch STK #from comtypes.client import GetActiveObject #Will allow you", "for line in faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0])", "dataFile.close() #Get max outage time for each chain, print to console and save", "not available to import until this point if this is the first time", "= chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible", "the report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row", "report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in", "= 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value =", "# 4 orbit planes j = 1 for trueAnomaly in range(0,360,45): # 8", "= sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the", "to the Application Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a", "sensor constellation, used later to hold all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation,", "objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc =", "#type info is available with python builtin type method #type(app) #More info is", "is the first time #running STK via COM with python....it won't hurt to", "in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will allow", "orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2", "Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication", "connect. Each line of the text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\")", "= STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using TwoBody", "scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign sensor to", "#Stagger TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each", "allow you to launch STK #from comtypes.client import GetActiveObject #Will allow you to", "chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color =", "= 1 for trueAnomaly in range(0,360,45): # 8 sats per plane #insert satellite", "16:00:00.000\" root.Rewind() #Insert Facilites from text file using connect. Each line of the", "of STK #Start the application, it will return a pointer to the Application", "for RAAN in range(0,180,45): # 4 orbit planes j = 1 for trueAnomaly", "= newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1", "16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from text file", "chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1", "= \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to hold", "constellation object i = 1 for RAAN in range(0,180,45): # 4 orbit planes", "chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the", "startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until {}\".format(facName, maxOutage, start, stop)) root.Rewind()", "faclist: for line in faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New / */Facility", "app.Visible = True app.UserControl = True root = app.Personality2 #root ->IAgStkObjectRoot #These are", "the first time #running STK via COM with python....it won't hurt to leave", "#Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify", "#type(app) #More info is available via python built in dir method, which will", "allow you to connect a running instance of STK #Start the application, it", "= 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial", "for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor", "python builtin help #help(app) app.Visible = True app.UserControl = True root = app.Personality2", "{} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create", "#Buildup Initial State using TwoBody Propagator and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical)", "stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {}", "= startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until {}\".format(facName, maxOutage, start, stop))", "for trueAnomaly in range(0,360,45): # 8 sats per plane #insert satellite newSat =", "file using connect. Each line of the text file is #formatted: #FacName,Longitude,Latitude with", "import GetActiveObject #Will allow you to connect a running instance of STK #Start", "facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation,", "every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor =", "Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity", "hurt to leave them there, but after running once they can be #included", "axis) 86.4 inclination 6 Orbit planes 30 degrees apart 11 in each plane", "= scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain =", "= True app.UserControl = True root = app.Personality2 #root ->IAgStkObjectRoot #These are not", "os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will allow you to launch STK #from", "if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will allow you to launch", "= \"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert", "before running each time, #or refactor code to avoid writing to file in", "line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic", "0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN", "#Modify some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False", "= 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the chain", "max outage time for each chain, print to console and save to file", "IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\"", "to cleanup this file before running each time, #or refactor code to avoid", "to file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}:", "builtin help #help(app) app.Visible = True app.UserControl = True root = app.Personality2 #root", "info is available via python built in dir method, which will list #all", "+ (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors", "instance of STK #Start the application, it will return a pointer to the", "import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2", "STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 ->", "\"%d %b %Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1]", "Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0])", "print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start", "StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to", "stopTime in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start =", "j = 1 for trueAnomaly in range(0,360,45): # 8 sats per plane #insert", "scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime", "row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for", "= scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime =", "newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using TwoBody Propagator and", "facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display", "outage time for each chain, print to console and save to file with", "scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2", "each chain, print to console and save to file with open(\"MaxOutageData.txt\", \"a\") as", "apart 11 in each plane \"\"\" import datetime as dt import numpy as", "satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes", "python built in dir method, which will list #all the available properties and", "stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop", "in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)]", "= scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color = 65280", "StartTimes and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues())", "\"%d %b %Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b", "\"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth =", "keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian)", "range(0,360,45): # 8 sats per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2", "via python built in dir method, which will list #all the available properties", "import CreateObject # Will allow you to launch STK #from comtypes.client import GetActiveObject", "outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop =", "#Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc", "as dt import numpy as np import os #Need to cleanup this file", "#included at the top with all the other import statements from comtypes.gen import", "= newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the SensorConstellation", "dt import numpy as np import os #Need to cleanup this file before", "used later to hold all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2", "hold all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build", "#Create sensor constellation, used later to hold all the sensor objects sensorConst =", "#all the available properties and methods available #dir(app) #Additional useful information is available", "keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value", "sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a Chain", "to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor projections sensor2.VO.PercentTranslucency =", "*/Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1])", "outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until {}\".format(facName, maxOutage, start, stop)) root.Rewind() root.Save()", "open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close()", "app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication #type", "= scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties", "the Application Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a pointer", "numRows = compAcc.DataSets.RowCount maxOutage = [] #Save out the report to a text", "useful information is available via the python builtin help #help(app) app.Visible = True", "return a pointer to the Application Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\")", "to connect a running instance of STK #Start the application, it will return", "data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage =", "sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty", "compAcc.DataSets.RowCount maxOutage = [] #Save out the report to a text file with", "available properties and methods available #dir(app) #Additional useful information is available via the", "= line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{}", "CreateObject # Will allow you to launch STK #from comtypes.client import GetActiveObject #Will", "using connect. Each line of the text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\",", "{} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand)", "insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {}", "65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName))", "refactor code to avoid writing to file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\")", "on Mon May 4 09:33:16 2020 @author: jvergere Ideas: Something similar to the", "#running STK via COM with python....it won't hurt to leave them there, but", "6 Orbit planes 30 degrees apart 11 in each plane \"\"\" import datetime", "= 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType =", "setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to", "#convert to from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\")", "#Get max outage time for each chain, print to console and save to", "root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later", "sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust", "until this point if this is the first time #running STK via COM", "statements from comtypes.gen import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario", "line of the text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist:", "newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color", "newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using", "chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount", "sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a Chain object for each Facility to", "= 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a Chain object for each", "newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit", "numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and StopTimes as", "sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach", "-> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun 2016", "application, it will return a pointer to the Application Interface app = CreateObject(\"STK12.Application\")", "as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from", "object for each Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in", "chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain", "75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a Chain object for each Facility", "Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity =", "%H:%M:%S.%f\") for stopTime in stopTimes]) outages = startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds()", "facData = line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition", "= np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages = startDatetimes[1:]", "to avoid writing to file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client", "facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain", "the application, it will return a pointer to the Application Interface app =", "\"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width", "the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a", "built in dir method, which will list #all the available properties and methods", "the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain facName =", "newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor)", "line in faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd)", "open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else:", "planes j = 1 for trueAnomaly in range(0,360,45): # 8 sats per plane", "Iridium Constellation: 66 Sats 781 km (7159 semimajor axis) 86.4 inclination 6 Orbit", "as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get", "sats per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change", "a pointer to the Application Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app", "via COM with python....it won't hurt to leave them there, but after running", "= \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from text file using connect.", "append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will allow you", "with open(\"Facilities.txt\", \"r\") as faclist: for line in faclist: facData = line.strip().split(\",\") insertNewFacCmd", "= \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {}", "which will list #all the available properties and methods available #dir(app) #Additional useful", "STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane", "is available via python built in dir method, which will list #all the", "startDatetimes[1:] - stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop))", "startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime in", "import numpy as np import os #Need to cleanup this file before running", "maxOutage = [] #Save out the report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\")", "planes 30 degrees apart 11 in each plane \"\"\" import datetime as dt", "is a pointer to IAgUiApplication #type info is available with python builtin type", "#insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display", "the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor projections sensor2.VO.PercentTranslucency = 75", "COM with python....it won't hurt to leave them there, but after running once", "#change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit =", "can be #included at the top with all the other import statements from", "#app = GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication #type info is available", "to IAgUiApplication #type info is available with python builtin type method #type(app) #More", "to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime in startTimes])", "in faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd", "else: #Get StartTimes and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes =", "#Attach sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2)", "be #included at the top with all the other import statements from comtypes.gen", "if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and StopTimes", "RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every", "np import os #Need to cleanup this file before running each time, #or", "*/Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor", "are not available to import until this point if this is the first", "chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames", "-*- \"\"\" Created on Mon May 4 09:33:16 2020 @author: jvergere Ideas: Something", "np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages = startDatetimes[1:] -", "in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 =", "and methods available #dir(app) #Additional useful information is available via the python builtin", "#FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for line in faclist: facData = line.strip().split(\",\")", "= trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate()", "running each time, #or refactor code to avoid writing to file in append", "Something similar to the Iridium Constellation: 66 Sats 781 km (7159 semimajor axis)", "j+=1 i+=1 #Create a Chain object for each Facility to the constellation. facCount", "2016 16:00:00.000\" root.Rewind() #Insert Facilites from text file using connect. Each line of", "sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the", "STK #from comtypes.client import GetActiveObject #Will allow you to connect a running instance", "they can be #included at the top with all the other import statements", "Orbit planes 30 degrees apart 11 in each plane \"\"\" import datetime as", "plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic", "for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime", "from comtypes.gen import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario", "STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0", "plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 =", "CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication #type info is", "other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j))", "them there, but after running once they can be #included at the top", "import statements from comtypes.gen import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario =", "list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b", "Outage\".format(facName)) else: #Get StartTimes and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes", "and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159", "-*- coding: utf-8 -*- \"\"\" Created on Mon May 4 09:33:16 2020 @author:", "chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el", "startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime in stopTimes]) outages", "method #type(app) #More info is available via python built in dir method, which", "available to import until this point if this is the first time #running", "comtypes.gen import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario ->", "#from comtypes.client import GetActiveObject #Will allow you to connect a running instance of", "range(0,180,45): # 4 orbit planes j = 1 for trueAnomaly in range(0,360,45): #", "#Start the application, it will return a pointer to the Application Interface app", "False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using TwoBody Propagator and Classical Orbital", "first time #running STK via COM with python....it won't hurt to leave them", "the other import statements from comtypes.gen import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\")", "you to connect a running instance of STK #Start the application, it will", "to from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for", "translucenty of the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1", "outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and", "file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will", "/ */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2],", "properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to", "from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime", "all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite", "time, #or refactor code to avoid writing to file in append mode if", "maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds", "= [] #Save out the report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as", "available #dir(app) #Additional useful information is available via the python builtin help #help(app)", "to each satellite sensor = newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the", "= False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access", "keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination", "the Iridium Constellation: 66 Sats 781 km (7159 semimajor axis) 86.4 inclination 6", "*/Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor constellation, used later to hold all the", "dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max", "\"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign sensor to constellation", "from comtypes.client import CreateObject # Will allow you to launch STK #from comtypes.client", "other import statements from comtypes.gen import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario", "available via python built in dir method, which will list #all the available", "#These are not available to import until this point if this is the", "in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for stopTime in stopTimes])", "to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain facName", "start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until", "2016 16:00:00.000\" scenario2.StopTime = \"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from text", "a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData", "= list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d", "startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to", "sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign", "= RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for", "781 km (7159 semimajor axis) 86.4 inclination 6 Orbit planes 30 degrees apart", "= \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics", "outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and StopTimes as lists startTimes =", "{}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd)", "scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime", "and save to file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows == 1:", "via the python builtin help #help(app) app.Visible = True app.UserControl = True root", "#root ->IAgStkObjectRoot #These are not available to import until this point if this", "inclination 6 Orbit planes 30 degrees apart 11 in each plane \"\"\" import", "keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType", "from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2 =", "display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible =", "65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State", "the available properties and methods available #dir(app) #Additional useful information is available via", "sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of", "root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand", "import until this point if this is the first time #running STK via", "Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = [] #Save out the", "datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes", "Propagator and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis =", "access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage", "km (7159 semimajor axis) 86.4 inclination 6 Orbit planes 30 degrees apart 11", "scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain,", "compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for each chain, print to console", "newSat.Children.New(STKObjects.eSensor,\"Sensor{}{}\".format(i,j)) sensor2 = sensor.QueryInterface(STKObjects.IAgSensor) sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2) #Add the sensor to the SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j))", "trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach", "sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle = STKUtil.eDotted j+=1 i+=1 #Create a Chain object for", "scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain) #Modify some display properties chain2.Graphics.Animation.Color = 65280 chain2.Graphics.Animation.LineWidth", "satellite constellation, attach sensors, assign sensor to constellation object i = 1 for", "time #running STK via COM with python....it won't hurt to leave them there,", "with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3]))", "STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger", "False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data", "compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = []", "os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will allow you to launch STK", "= 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value =", "= compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for each chain, print to", "range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for each chain,", "= STKUtil.eDotted j+=1 i+=1 #Create a Chain object for each Facility to the", "True app.UserControl = True root = app.Personality2 #root ->IAgStkObjectRoot #These are not available", "all the other import statements from comtypes.gen import STKUtil from comtypes.gen import STKObjects", "basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1 newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible", "print to console and save to file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if", "to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows):", "#Need to cleanup this file before running each time, #or refactor code to", "in range(numRows): rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for each", "console and save to file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows ==", "to console and save to file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows", "IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\"", "#Additional useful information is available via the python builtin help #help(app) app.Visible =", "file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for line in faclist:", "- stopDatetimes[:-1] maxOutage = np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}:", "per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some", "text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData =", "TwoBody Propagator and Classical Orbital Elements keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis", "to import until this point if this is the first time #running STK", "scenario.Children.New(STKObjects.eSatellite, \"Sat{}{}\".format(i,j)) newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite) #change some basic display attributes newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535", "as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes", "= GetActiveObject(\"STK12.Application\") #app is a pointer to IAgUiApplication #type info is available with", "each time, #or refactor code to avoid writing to file in append mode", "range(facCount): #Create Chain facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName chain = scenario.Children.New(STKObjects.eChain, \"{}ToSensorConst\".format(facName)) chain2 = chain.QueryInterface(STKObjects.IAgChain)", "1 for RAAN in range(0,180,45): # 4 orbit planes j = 1 for", "objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors,", "compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = [] #Save out the report to a", "Mon May 4 09:33:16 2020 @author: jvergere Ideas: Something similar to the Iridium", "point if this is the first time #running STK via COM with python....it", "open(\"Facilities.txt\", \"r\") as faclist: for line in faclist: facData = line.strip().split(\",\") insertNewFacCmd =", "of the text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for", "(45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian) newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate() #Attach sensors to", "root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime = \"1", "assign sensor to constellation object i = 1 for RAAN in range(0,180,45): #", "writing to file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject", "info is available with python builtin type method #type(app) #More info is available", "Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for i in range(facCount): #Create Chain", "pointer to IAgUiApplication #type info is available with python builtin type method #type(app)", "is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for line in faclist: facData", "2020 @author: jvergere Ideas: Something similar to the Iridium Constellation: 66 Sats 781", "root.Rewind() #Insert Facilites from text file using connect. Each line of the text", "information is available via the python builtin help #help(app) app.Visible = True app.UserControl", "0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly", "np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {}", "available with python builtin type method #type(app) #More info is available via python", "object i = 1 for RAAN in range(0,180,45): # 4 orbit planes j", "= np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3],", "trueAnomaly in range(0,360,45): # 8 sats per plane #insert satellite newSat = scenario.Children.New(STKObjects.eSatellite,", "is available via the python builtin help #help(app) app.Visible = True app.UserControl =", "it will return a pointer to the Application Interface app = CreateObject(\"STK12.Application\") #app", "is available with python builtin type method #type(app) #More info is available via", "mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import CreateObject # Will allow you to", "once they can be #included at the top with all the other import", "= chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows = compAcc.DataSets.RowCount maxOutage = [] #Save", "11 in each plane \"\"\" import datetime as dt import numpy as np", "True root = app.Personality2 #root ->IAgStkObjectRoot #These are not available to import until", "= root.CurrentScenario #scenario -> IAgStkObject scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario scenario2.StartTime =", "%b %Y %H:%M:%S.%f\") for startTime in startTimes]) stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], \"%d %b %Y", "to the Iridium Constellation: 66 Sats 781 km (7159 semimajor axis) 86.4 inclination", "Constellation: 66 Sats 781 km (7159 semimajor axis) 86.4 inclination 6 Orbit planes", "constellation, attach sensors, assign sensor to constellation object i = 1 for RAAN", "to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc = chain.DataProviders.Item(\"Complete", "complete chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el = compAcc.DataSets.ElementNames numRows =", "to launch STK #from comtypes.client import GetActiveObject #Will allow you to connect a", "out the report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for", "== 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get StartTimes and StopTimes as lists", "a Chain object for each Facility to the constellation. facCount = scenario.Children.GetElements(STKObjects.eFacility).Count for", "False #Buildup Initial State using TwoBody Propagator and Classical Orbital Elements keplarian =", "sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign sensor to constellation object i =", "4 09:33:16 2020 @author: jvergere Ideas: Something similar to the Iridium Constellation: 66", "as np import os #Need to cleanup this file before running each time,", "save to file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName))", "= STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2)", "Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from text file using connect. Each line", "code to avoid writing to file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from", "= newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical) keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159 keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0 keplarian.Orientation.Inclination =", "newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False #Buildup Initial State using TwoBody Propagator and Classical Orbital Elements", "utf-8 -*- \"\"\" Created on Mon May 4 09:33:16 2020 @author: jvergere Ideas:", "degrees apart 11 in each plane \"\"\" import datetime as dt import numpy", "file before running each time, #or refactor code to avoid writing to file", "strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], \"%d %b %Y %H:%M:%S.%f\") for startTime in", "#help(app) app.Visible = True app.UserControl = True root = app.Personality2 #root ->IAgStkObjectRoot #These", "this point if this is the first time #running STK via COM with", "there, but after running once they can be #included at the top with", "stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until {}\".format(facName, maxOutage,", "to leave them there, but after running once they can be #included at", "Terrain\".format(facData[0], facData[2], facData[1]) root.ExecuteCommand(setPositionCmd) setColorCommand = \"Graphics */Facility/{} SetColor blue\".format(facData[0]) root.ExecuteCommand(setColorCommand) #Create sensor", "keplarian.Orientation.Inclination = 86.4 keplarian.Orientation.ArgOfPerigee = 0 keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN keplarian.LocationType", "#or refactor code to avoid writing to file in append mode if os.path.exists(\"MaxOutageData.txt\"):", "connect a running instance of STK #Start the application, it will return a", "os #Need to cleanup this file before running each time, #or refactor code", "at the top with all the other import statements from comtypes.gen import STKUtil", "in each plane \"\"\" import datetime as dt import numpy as np import", "#scenaro2 -> IAgScenario scenario2.StartTime = \"1 Jun 2016 16:00:00.000\" scenario2.StopTime = \"2 Jun", "No Outage\".format(facName)) else: #Get StartTimes and StopTimes as lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues())", "Sats 781 km (7159 semimajor axis) 86.4 inclination 6 Orbit planes 30 degrees", "STK via COM with python....it won't hurt to leave them there, but after", "avoid writing to file in append mode if os.path.exists(\"MaxOutageData.txt\"): os.remove(\"MaxOutageData.txt\") from comtypes.client import", "import STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject", "chain, print to console and save to file with open(\"MaxOutageData.txt\", \"a\") as outageFile:", "in range(0,180,45): # 4 orbit planes j = 1 for trueAnomaly in range(0,360,45):", "python builtin type method #type(app) #More info is available via python built in", "file with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No", "similar to the Iridium Constellation: 66 Sats 781 km (7159 semimajor axis) 86.4", "numpy as np import os #Need to cleanup this file before running each", "from text file using connect. Each line of the text file is #formatted:", "\"\"\" Created on Mon May 4 09:33:16 2020 @author: jvergere Ideas: Something similar", "with open(\"MaxOutageData.txt\", \"a\") as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName))", "chain2.Graphics.Animation.LineWidth = STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\")", "\"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd = \"SetPosition */Facility/{} Geodetic {} {} Terrain\".format(facData[0],", "RAAN in range(0,180,45): # 4 orbit planes j = 1 for trueAnomaly in", "#More info is available via python built in dir method, which will list", "sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation) #Build satellite constellation, attach sensors, assign sensor to constellation object", "sensors, assign sensor to constellation object i = 1 for RAAN in range(0,180,45):", "@author: jvergere Ideas: Something similar to the Iridium Constellation: 66 Sats 781 km", "import datetime as dt import numpy as np import os #Need to cleanup", "Application Interface app = CreateObject(\"STK12.Application\") #app = GetActiveObject(\"STK12.Application\") #app is a pointer to", "stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from {} until {}\".format(facName, maxOutage, start,", "type method #type(app) #More info is available via python built in dir method,", "May 4 09:33:16 2020 @author: jvergere Ideas: Something similar to the Iridium Constellation:", "SensorConstellation sensorConst2.Objects.Add(\"Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}\".format(i,j)) #Adjust the translucenty of the sensor projections sensor2.VO.PercentTranslucency = 75 sensor2.Graphics.LineStyle", "file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3])) for row in range(numRows): rowData = compAcc.DataSets.GetRow(row)", "rowData = compAcc.DataSets.GetRow(row) dataFile.write(\"{},{},{},{}\\n\".format(rowData[0],rowData[1],rowData[2],rowData[3])) dataFile.close() #Get max outage time for each chain, print", "the text file is #formatted: #FacName,Longitude,Latitude with open(\"Facilities.txt\", \"r\") as faclist: for line", "= STKObjects.e1 chain2.Graphics.Animation.IsHighlightVisible = False #Add objects to the chain chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get", "stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings to datetimes startDatetimes = np.array([dt.datetime.strptime(startTime[:-3],", "= app.Personality2 #root ->IAgStkObjectRoot #These are not available to import until this point", "to hold all the sensor objects sensorConst = scenario.Children.New(STKObjects.eConstellation, \"SensorConst\") sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation)", "will list #all the available properties and methods available #dir(app) #Additional useful information", "lists startTimes = list(compAcc.DataSets.GetDataSetByName(\"Start Time\").GetValues()) stopTimes = list(compAcc.DataSets.GetDataSetByName(\"Stop Time\").GetValues()) #convert to from strings", "python....it won't hurt to leave them there, but after running once they can", "= np.amax(outages).total_seconds() start = stopTimes[np.argmax(outages)] stop = startTimes[np.argmax(outages)+1] outageFile.write(\"{},{},{},{}\\n\".format(facName,maxOutage,start,stop)) print(\"{}: {} seconds from", "#Will allow you to connect a running instance of STK #Start the application,", "# -*- coding: utf-8 -*- \"\"\" Created on Mon May 4 09:33:16 2020", "#Save out the report to a text file with open(\"{}CompleteChainAccess.txt\".format(facName),\"w\") as dataFile: dataFile.write(\"{},{},{},{}\\n\".format(el[0],el[1],el[2],el[3]))", "(7159 semimajor axis) 86.4 inclination 6 Orbit planes 30 degrees apart 11 in", "chain2.Objects.Add(\"Facility/{}\".format(facName)) chain2.Objects.Add(\"Constellation/SensorConst\") #Get complete chain access data compAcc = chain.DataProviders.Item(\"Complete Access\").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime) el =", "\"2 Jun 2016 16:00:00.000\" root.Rewind() #Insert Facilites from text file using connect. Each", "IAgUiApplication #type info is available with python builtin type method #type(app) #More info", "\"a\") as outageFile: if numRows == 1: outageFile.write(\"{},NA,NA,NA\\n\".format(facName)) print(\"{}: No Outage\".format(facName)) else: #Get", "keplarian.LocationType = STKObjects.eLocationTrueAnomaly keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other", "faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New / */Facility {}\".format(facData[0]) root.ExecuteCommand(insertNewFacCmd) setPositionCmd =", "#Build satellite constellation, attach sensors, assign sensor to constellation object i = 1", "leave them there, but after running once they can be #included at the", "\"r\") as faclist: for line in faclist: facData = line.strip().split(\",\") insertNewFacCmd = \"New", "method, which will list #all the available properties and methods available #dir(app) #Additional", "STKUtil from comtypes.gen import STKObjects root.NewScenario(\"NewTestScenario\") scenario = root.CurrentScenario #scenario -> IAgStkObject scenario2" ]
[]
[ "line) in enumerate(lines): if i == 0: continue guid = \"train-%d\" % (i)", "Unless required by applicable law or agreed to in writing, software # distributed", "guid = \"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if", "set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a,", "problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5])", "is a problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label", "text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line) != 6: # there", "Imports <<<<<<< from retrograph.models import tokenization #################################################### # CODE #################################################### class InputExample(object): \"\"\"A", "% (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\":", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines", "as f: for line in f: if skipFirstLine: skipFirstLine = False else: text", "\"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM']", "SICK data set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine = True sick_data =", "\"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "limitations under the License. #################################################### #################################################### # IMPORT STATEMENTS #################################################### # >>>>>> Native", "if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\"", "\"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts,", "in enumerate(lines): # Only the test set has a header if set_type ==", "examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "\"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self):", "= line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text", "\"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line)", "tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label =", "tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"contradiction\" else: label", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir):", "tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added by", "\"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b =", "loadFile(self, fpath): trec_data = {'X': [], 'y': []} with os.open(fpath, 'r', encoding='latin-1') as", "# >>>>>> Native Imports <<<<<<< import os # >>>>>> Package Imports <<<<<<< import", "= tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label = \"contradiction\" else:", "class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the", "class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set", "self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base", "the test set has a header if set_type == \"test\" and i ==", "be specified for sequence pair tasks. label: (Optional) string. The label of the", "tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor", "a collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base", "label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir):", "[] for (i, dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a", "\"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines, set_type):", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "self.text_a = text_a self.text_b = text_b self.label = label class DataProcessor(object): \"\"\"Base class", "text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE", "example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a", "if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\")", "os.open(fpath, 'r', encoding='latin-1') as f: for line in f: target, sample = line.strip().split(':',", "set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")),", "not for test examples. \"\"\" self.guid = guid self.text_a = text_a self.text_b =", "matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return", "<<<<<<< import tensorflow as tf import csv # >>>>>> Local Imports <<<<<<< from", "len(line) != 6: # there is a problematic line print(line) continue text_a =", "== \"test\": label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "skipFirstLine = True sick_data = {'X_A': [], 'X_B': [], 'y': []} with os.open(fpath,", "WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "language governing permissions and # limitations under the License. #################################################### #################################################### # IMPORT", "and dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines): # Only", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched:", "tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label = \"0\" else: label", "text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2])", "% (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6])", "as tf import csv # >>>>>> Local Imports <<<<<<< from retrograph.models import tokenization", "of the example. This should be specified for train and dev examples, but", "text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set", "label=label)) return examples def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class", "data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection", "\"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base", "= self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = [] for (i, line)", "return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training", "else: if len(line) != 6: # there is a problematic line print(line) continue", "self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class", "\"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"", "else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor):", "\"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional)", "\"\"\"Gets the list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def", "label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\" def get_train_examples(self,", "= tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"contradiction\" else:", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\",", "= tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added", "def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples", "for the SICK data set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine = True", "label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))", "self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self,", "tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label =", "(set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return", "tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for", "\"0\" else: if len(line) != 6: # there is a problematic line print(line)", "in f: target, sample = line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample)", "not use this file except in compliance with the License. # You may", "in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b =", "MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set (SentEval", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for", "CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\"", "line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and", "continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a,", "base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the", "line in f: if skipFirstLine: skipFirstLine = False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split())", "classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid: Unique", "Google AI Language Team Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>", "agreed to in writing, software # distributed under the License is distributed on", "class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the", "CODE #################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def", "self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")),", "the SICK data set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine = True sick_data", "def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),", "= [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for", "and dev examples, but not for test examples. \"\"\" self.guid = guid self.text_a", "= tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label", "sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\")", "for line in f: target, sample = line.strip().split(':', 1) sample = sample.split(' ',", "0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a,", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base", "untokenized text of the second sequence. Only must be specified for sequence pair", "return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def", "retrograph.models import tokenization #################################################### # CODE #################################################### class InputExample(object): \"\"\"A single training/test example", "<NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor):", "return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set (GLUE version).\"\"\" def", "value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines", "training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs", "text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set", "class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC',", "== \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else: if", "text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples", "label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set (SentEval version).\"\"\"", "Team Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Licensed", "to in writing, software # distributed under the License is distributed on an", "specified for sequence pair tasks. label: (Optional) string. The label of the example.", "implied. # See the License for the specific language governing permissions and #", "i == 0: continue guid = \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b", "enumerate(lines): if i == 0: continue guid = \"dev-%d\" % (i) language =", "raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev", "examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self,", "sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The", "text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid: Unique id for the example. text_a:", "\"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples(", "for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of", "== 0: continue guid = \"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b", "examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "\"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines,", "\"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines,", "dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines): # Only the", "\"\"\"Processor for the RTE data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data", "= \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base", "return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training", "tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the", "def loadFile(self, fpath): trec_data = {'X': [], 'y': []} with os.open(fpath, 'r', encoding='latin-1')", "\"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a", "== \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "\"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base", "class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir,", "text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\" def", "Imports <<<<<<< import os # >>>>>> Package Imports <<<<<<< import tensorflow as tf", "\"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\"", "\"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if", "\"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self,", "def loadFile(self, fpath): skipFirstLine = True sick_data = {'X_A': [], 'X_B': [], 'y':", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "if skipFirstLine: skipFirstLine = False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return", "set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else:", "you may not use this file except in compliance with the License. #", "data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir):", "<<<<<<< import os # >>>>>> Package Imports <<<<<<< import tensorflow as tf import", "for test examples. \"\"\" self.guid = guid self.text_a = text_a self.text_b = text_b", "guid = \"%s-%s\" % (set_type, i) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1])", "target, sample = line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return", "as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in", "in enumerate(lines): if i == 0: continue guid = \"dev-%d\" % (i) language", "return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set (SentEval version).\"\"\" def", "\"\"\"Reads a tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader =", "== \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set (GLUE version).\"\"\"", ">>>>>> Package Imports <<<<<<< import tensorflow as tf import csv # >>>>>> Local", "under the License. #################################################### #################################################### # IMPORT STATEMENTS #################################################### # >>>>>> Native Imports", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "<<<<<<< from retrograph.models import tokenization #################################################### # CODE #################################################### class InputExample(object): \"\"\"A single", "train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for", "== \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "_create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples =", "print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid,", "the first sequence. For single sequence tasks, only this sequence must be specified.", "label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod", "(i, line) in enumerate(lines): if i == 0: continue guid = \"dev-%d\" %", "\"\"\"Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The", "label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\"", "\"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def", "sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self,", "= \"%s-%s\" % (set_type, i) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label", "get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self):", "text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line) !=", "data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")", "trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir,", "self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index", "__init__(self): self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(", "text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set", "[\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev", "tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the", "examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self,", "\"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\"", "the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s", "label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a,", "self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM',", "if i == 0: continue guid = \"%s-%s\" % (set_type, i) text_a =", "trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index =", "self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0:", "sets.\"\"\" examples = [] for (i, line) in enumerate(lines): if i == 0:", "= tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples #################################################### # MAIN #################################################### #", "Imports <<<<<<< import tensorflow as tf import csv # >>>>>> Local Imports <<<<<<<", "in f: if skipFirstLine: skipFirstLine = False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split())", "\"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self,", "__init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid: Unique id for", "string. The untokenized text of the second sequence. Only must be specified for", "collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of", "for sequence pair tasks. label: (Optional) string. The label of the example. This", "tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self,", "get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError()", "for (i, line) in enumerate(lines): if i == 0: continue guid = \"train-%d\"", "\"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label =", "XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\" def __init__(self): self.language = \"zh\" def", "f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader:", "str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples", "base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the", "\"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None,", "matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return", "list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file,", "\"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\")", "set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data = {'X': [], 'y': []} with", "dev examples, but not for test examples. \"\"\" self.guid = guid self.text_a =", "test examples. \"\"\" self.guid = guid self.text_a = text_a self.text_b = text_b self.label", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See", "in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label =", "6: # there is a problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "'y': []} with os.open(fpath, 'r', encoding='utf-8') as f: for line in f: if", "(SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine = True sick_data = {'X_A': [], 'X_B':", "text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"entailment\" else: label =", "for (i, dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a =", "with os.open(fpath, 'r', encoding='latin-1') as f: for line in f: target, sample =", "for the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection of", "\"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for", "'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def", "return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set (SentEval version).\"\"\" def", "guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1])", "def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\")", "the example. This should be specified for train and dev examples, but not", "text_b=None, label=label)) return examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA", "See the License for the specific language governing permissions and # limitations under", "specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must", "tasks. label: (Optional) string. The label of the example. This should be specified", "tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and", "tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line) != 6: #", "skipFirstLine: skipFirstLine = False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data", "lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples = []", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "# Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Licensed under the", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\"", "else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b,", "\"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i, line)", "class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set (SentEval version).\"\"\" def loadFile(self, fpath):", "== 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test':", "i == 0: continue guid = \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if", "RTE data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "(SentEval version).\"\"\" def loadFile(self, fpath): trec_data = {'X': [], 'y': []} with os.open(fpath,", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data", "# Only the test set has a header if set_type == \"test\" and", "'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def", "Language Team Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # #", "set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"", "lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = [] for (i,", "lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor", "% (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label", "second sequence. Only must be specified for sequence pair tasks. label: (Optional) string.", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "def get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def", "= sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See base", "get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir):", "\"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched:", "== tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def", "trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label'))", "i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1])", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if", "text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"entailment\"", "\"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a =", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor", "\"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI", "base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for", "= tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7])", "continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9])", "'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC',", "= [] for (i, line) in enumerate(lines): # Only the test set has", "KIND, either express or implied. # See the License for the specific language", "dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for", "Local Imports <<<<<<< from retrograph.models import tokenization #################################################### # CODE #################################################### class InputExample(object):", "return examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set", "string. The untokenized text of the first sequence. For single sequence tasks, only", "= tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for", "For single sequence tasks, only this sequence must be specified. text_b: (Optional) string.", "text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data", "training and dev sets.\"\"\" examples = [] for (i, dict) in enumerate(dicts): guid", "text_a=text_a, text_b=None, label=label)) return examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the", "lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\" def __init__(self):", "set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples = [] for", "examples = [] for (i, line) in enumerate(lines): if i == 0: continue", "The untokenized text of the first sequence. For single sequence tasks, only this", "Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Licensed under", "def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self,", "= \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type", "set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine = True sick_data = {'X_A': [],", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "ANY KIND, either express or implied. # See the License for the specific", "text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set (GLUE", "0: continue guid = \"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b =", "the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s", "(GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\")", "= tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[0])", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "The Google AI Language Team Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>,", "be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only", "= len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data =", "untokenized text of the first sequence. For single sequence tasks, only this sequence", "2018 The Google AI Language Team Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>,", "for the TREC data set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data = {'X':", "training and dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines): #", "\"\"\"Processor for the STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "train and dev examples, but not for test examples. \"\"\" self.guid = guid", "% (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append(", "\"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self,", "set_type == \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a,", "tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def", "% (set_type, i) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\"", "text_a self.text_b = text_b self.label = label class DataProcessor(object): \"\"\"Base class for data", "example. text_a: string. The untokenized text of the first sequence. For single sequence", "'X_B': [], 'y': []} with os.open(fpath, 'r', encoding='utf-8') as f: for line in", "separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)", "% self.language)) examples = [] for (i, line) in enumerate(lines): if i ==", "text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"contradiction\"", "continue guid = \"%s-%s\" % (set_type, i) if set_type == \"test\": text_a =", "\"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return", "\"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self,", "data_dir): \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets", "label = \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for", "tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self,", "the MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "% (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1])", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\"", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "from retrograph.models import tokenization #################################################### # CODE #################################################### class InputExample(object): \"\"\"A single training/test", "guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid: Unique id for the", "examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "IMPORT STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<< import os # >>>>>> Package", "'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the", "data set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine = True sick_data = {'X_A':", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"]", "\"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label))", "\"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "\"\"\"Gets a collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the", "text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"0\" else: label =", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return", "#################################################### #################################################### # IMPORT STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<< import os", "sample = line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data", "applicable law or agreed to in writing, software # distributed under the License", "set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")),", "text_a: string. The untokenized text of the first sequence. For single sequence tasks,", "text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set (GLUE", "\"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"", "and dev sets.\"\"\" examples = [] for (i, dict) in enumerate(dicts): guid =", "of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None):", "line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append(", "base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "data set.\"\"\" def __init__(self): self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "and i == 0: continue guid = \"%s-%s\" % (set_type, i) if set_type", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # Copyright", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base", "get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See", "text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"contradiction\" else: label =", "class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor", "the training and dev sets.\"\"\" examples = [] for (i, dict) in enumerate(dicts):", "examples = [] for (i, dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type,", "= [] for (i, dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i))", "set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def", "writing, software # distributed under the License is distributed on an \"AS IS\"", "for the training and dev sets.\"\"\" examples = [] for (i, line) in", "label class DataProcessor(object): \"\"\"Base class for data converters for sequence classification data sets.\"\"\"", "'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"", "\"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label =", "set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a,", "there is a problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4])", "specific language governing permissions and # limitations under the License. #################################################### #################################################### #", "compliance with the License. # You may obtain a copy of the License", "base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def", "\"test\" and i == 0: continue guid = \"%s-%s\" % (set_type, i) if", "InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for", "!= 6: # there is a problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3])", "(set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label", "quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor):", "self.label = label class DataProcessor(object): \"\"\"Base class for data converters for sequence classification", "return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def", "must be specified. text_b: (Optional) string. The untokenized text of the second sequence.", "\"\"\"Creates examples for the training and dev sets.\"\"\" examples = [] for (i,", "label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for", "else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor):", "csv # >>>>>> Local Imports <<<<<<< from retrograph.models import tokenization #################################################### # CODE", "'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a =", "i == 0: continue guid = \"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3])", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def", "label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor", "(the \"License\"); # you may not use this file except in compliance with", "else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See", "the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "be specified for train and dev examples, but not for test examples. \"\"\"", "text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1])", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"] def", "# Unless required by applicable law or agreed to in writing, software #", "label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor", "simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args:", "by applicable law or agreed to in writing, software # distributed under the", "get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for", "\"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev", "def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See", "tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line) != 6: # there is a", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\",", "label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a,", "TREC data set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data = {'X': [], 'y':", "file except in compliance with the License. # You may obtain a copy", ">>>>>> Local Imports <<<<<<< from retrograph.models import tokenization #################################################### # CODE #################################################### class", "get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See", "get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates", "for the XNLI data set.\"\"\" def __init__(self): self.language = \"zh\" def get_train_examples(self, data_dir):", "= tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "= tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for", "if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this", "\"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\"", "== 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b", "sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA", "= tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append(", "\"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base", "self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and", "= tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for", "= tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added by Anne''' class", "RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See", "\"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = [] for (i, line) in enumerate(lines): if", "but not for test examples. \"\"\" self.guid = guid self.text_a = text_a self.text_b", "if i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a =", "class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")),", "the list of labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls,", "guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "examples. \"\"\" self.guid = guid self.text_a = text_a self.text_b = text_b self.label =", "base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for", "get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base", "if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label =", "dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b", "= \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise", "return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')),", "self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return", "data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:],", "tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid,", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC", "matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def", "\"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\",", "NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with", "tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples #################################################### # MAIN #################################################### # EOF", "text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples ####################################################", "= self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i, line) in enumerate(lines): if i", "guid = \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI", "get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")", "text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label = \"0\" else: label =", "get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir):", "base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR',", "class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base", "tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See base", "text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid: Unique id for the example.", "base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type):", "the MRPC data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B", "text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\")", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC", "if i == 0: continue guid = \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0])", "classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the", "tensorflow as tf import csv # >>>>>> Local Imports <<<<<<< from retrograph.models import", "label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor", "'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\"", "base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data", "else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor):", "dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label", "(i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b", "this sequence must be specified. text_b: (Optional) string. The untokenized text of the", "QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"", "[\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"", "should be specified for train and dev examples, but not for test examples.", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type):", "i) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a", "return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT']", "{'X': [], 'y': []} with os.open(fpath, 'r', encoding='latin-1') as f: for line in", "tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the", "text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir,", "tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the", "\"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples", "for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True):", "text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE", "\"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples =", "set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3])", "tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor):", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "for (i, line) in enumerate(lines): if i == 0: continue guid = \"dev-%d\"", "(Optional) string. The untokenized text of the second sequence. Only must be specified", "collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets", "a problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label =", "csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines", "\"%s-%s\" % (set_type, i) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label =", "return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE", "class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")),", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates", "sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See", "\"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"", "the specific language governing permissions and # limitations under the License. #################################################### ####################################################", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"]", "== \"test\" and i == 0: continue guid = \"%s-%s\" % (set_type, i)", "text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "examples for the training and dev sets.\"\"\" examples = [] for (i, dict)", "label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor", "get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See", "the License for the specific language governing permissions and # limitations under the", "def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7", "examples for the training and dev sets.\"\"\" examples = [] for (i, line)", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return", "NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\" raise NotImplementedError()", "enumerate(lines): if i == 0: continue guid = \"train-%d\" % (i) text_a =", "for the RTE data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates", "'y': []} with os.open(fpath, 'r', encoding='latin-1') as f: for line in f: target,", "tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label =", "text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set (SentEval", "guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if", "text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label = \"0\"", "= tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[-1])", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label =", "# >>>>>> Local Imports <<<<<<< from retrograph.models import tokenization #################################################### # CODE ####################################################", "def __init__(self): self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines =", "else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor):", "f: target, sample = line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target)", "import csv # >>>>>> Local Imports <<<<<<< from retrograph.models import tokenization #################################################### #", "class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "\"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type):", "1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir):", "\"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir,", "file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines =", "(i, line) in enumerate(lines): if i == 0: continue guid = \"%s-%s\" %", "<NAME>, <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the", "self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = [] for (i, line) in", "fpath): trec_data = {'X': [], 'y': []} with os.open(fpath, 'r', encoding='latin-1') as f:", "== 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a", "class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self,", "tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self,", "tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid,", "Version 2.0 (the \"License\"); # you may not use this file except in", "else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return", "tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f, delimiter=\"\\t\",", "<NAME>, <NAME>, <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0", "self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base", "guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if", "as f: for line in f: target, sample = line.strip().split(':', 1) sample =", "the training and dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines):", "language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1])", "data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else:", "text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "= tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "[\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and", "= \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "= tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class", "text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label = \"contradiction\"", "text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE", "enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y'])", "return sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def", "['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for", "InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a,", "this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab", "return trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index", "guid self.text_a = text_a self.text_b = text_b self.label = label class DataProcessor(object): \"\"\"Base", "class for data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets", "'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"", "= tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for", "examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set (SentEval version).\"\"\" def loadFile(self,", "language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b =", "def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid: Unique id", "lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i, line) in enumerate(lines): if", "text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1])", "class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = [] for", "tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append(", "[]} with os.open(fpath, 'r', encoding='utf-8') as f: for line in f: if skipFirstLine:", "(set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label", "(Optional) string. The label of the example. This should be specified for train", "= \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None,", "base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i, line) in", "{'X_A': [], 'X_B': [], 'y': []} with os.open(fpath, 'r', encoding='utf-8') as f: for", "class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training", "return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set (GLUE version).\"\"\" def", "(i, dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A'])", "\"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type ==", "STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def", "string. The label of the example. This should be specified for train and", "sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized", "The untokenized text of the second sequence. Only must be specified for sequence", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\"", "class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "\"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "License. #################################################### #################################################### # IMPORT STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<< import", "= \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"", "id for the example. text_a: string. The untokenized text of the first sequence.", "sequence must be specified. text_b: (Optional) string. The untokenized text of the second", "i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8])", "tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid,", "OF ANY KIND, either express or implied. # See the License for the", "!= tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append(", "continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a,", "return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\")", "converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of", "for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a", "= tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"", "if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a =", "line) in enumerate(lines): if i == 0: continue guid = \"dev-%d\" % (i)", "DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See", "\"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines,", "get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return", "examples = [] for (i, line) in enumerate(lines): # Only the test set", "the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "\"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b =", "text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set", "with tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = []", "', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data", "label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\"", "\"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\")", "= tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "= 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid,", "text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples", "data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples", "# IMPORT STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<< import os # >>>>>>", "if i == 0: continue guid = \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0])", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training", "label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"", "label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor", "text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "has a header if set_type == \"test\" and i == 0: continue guid", "or agreed to in writing, software # distributed under the License is distributed", "sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')),", "label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor", "# >>>>>> Package Imports <<<<<<< import tensorflow as tf import csv # >>>>>>", "== 0: continue guid = \"%s-%s\" % (set_type, i) if set_type == \"test\":", "tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label =", "= \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label =", "label=None): \"\"\"Constructs a InputExample. Args: guid: Unique id for the example. text_a: string.", "= \"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\",", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "License. # You may obtain a copy of the License at # #", "'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self,", "first sequence. For single sequence tasks, only this sequence must be specified. text_b:", "the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "os # >>>>>> Package Imports <<<<<<< import tensorflow as tf import csv #", ">>>>>> Native Imports <<<<<<< import os # >>>>>> Package Imports <<<<<<< import tensorflow", "STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")),", "if set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid,", "continue guid = \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label", "def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = []", "2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Licensed under the Apache License,", "(i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label ==", "label = \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "data set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "= tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "[], 'X_B': [], 'y': []} with os.open(fpath, 'r', encoding='utf-8') as f: for line", "example. This should be specified for train and dev examples, but not for", "\"\"\"Processor for the SICK data set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine =", "== 0: continue guid = \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b =", "'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and dev", "\"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts,", "in enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))", "the XNLI data set.\"\"\" def __init__(self): self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See", "if set_type == \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid,", "for (i, line) in enumerate(lines): if i == 0: continue guid = \"%s-%s\"", "class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "skipFirstLine = False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def", "the TREC data set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data = {'X': [],", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data", "get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\")", "set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else:", "for train and dev examples, but not for test examples. \"\"\" self.guid =", "\"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See", "line in f: target, sample = line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split()", "= tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for", "\"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples(", "only this sequence must be specified. text_b: (Optional) string. The untokenized text of", "text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set", "if set_type == \"test\" and i == 0: continue guid = \"%s-%s\" %", "version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def", "os.open(fpath, 'r', encoding='utf-8') as f: for line in f: if skipFirstLine: skipFirstLine =", "= len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir,", "def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7", "def get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\" raise NotImplementedError()", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\"", "version).\"\"\" def loadFile(self, fpath): skipFirstLine = True sick_data = {'X_A': [], 'X_B': [],", "governing permissions and # limitations under the License. #################################################### #################################################### # IMPORT STATEMENTS", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= guid self.text_a = text_a self.text_b = text_b self.label = label class DataProcessor(object):", "set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the", "get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines,", "= tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "the second sequence. Only must be specified for sequence pair tasks. label: (Optional)", "QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "= tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples #################################################### #", "text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "a tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader = csv.reader(f,", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples", "a header if set_type == \"test\" and i == 0: continue guid =", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base", "sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"", "\"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")", "sick_data = {'X_A': [], 'X_B': [], 'y': []} with os.open(fpath, 'r', encoding='utf-8') as", "enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if", "continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a =", "def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")),", "split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "single training/test example for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None):", "\"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self,", "for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "= \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b", "0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a", "f: for line in f: if skipFirstLine: skipFirstLine = False else: text =", "sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample. Args: guid:", "examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set (GLUE version).\"\"\" def get_train_examples(self,", "enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B'])", "label = \"0\" else: if len(line) != 6: # there is a problematic", "or implied. # See the License for the specific language governing permissions and", "Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self,", "tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b =", "class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "[\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev", "base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION',", "== \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label", "= tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"entailment\" else:", "single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The", "label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a,", "text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"):", "Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Licensed under the Apache", "(GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")", "= {'X_A': [], 'X_B': [], 'y': []} with os.open(fpath, 'r', encoding='utf-8') as f:", "of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a", "def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates", "test set has a header if set_type == \"test\" and i == 0:", "label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added by Anne'''", "i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label =", "class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\" def __init__(self): self.language = \"zh\"", "= tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append(", "get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError()", "(set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label", "labels for this data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads", "a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir):", "= tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label = \"0\" else:", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data", "import os # >>>>>> Package Imports <<<<<<< import tensorflow as tf import csv", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\"", "get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir,", "def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir):", "% (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label))", "self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir,", "tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append(", "use this file except in compliance with the License. # You may obtain", "\"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\"", "data set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data = {'X': [], 'y': []}", "[], 'y': []} with os.open(fpath, 'r', encoding='utf-8') as f: for line in f:", "encoding='utf-8') as f: for line in f: if skipFirstLine: skipFirstLine = False else:", "= False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self,", "= tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line) != 6:", "`InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for", "return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY',", "\"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "label=label)) return examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data", "1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data =", "Only the test set has a header if set_type == \"test\" and i", "lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\" def __init__(self): self.language =", "\"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type ==", "def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self):", "set_type == \"test\": label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a,", "def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See", "get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base", "= tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type):", "= \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def", "Package Imports <<<<<<< import tensorflow as tf import csv # >>>>>> Local Imports", "'''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label", "in enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type, i)", "raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\"", "= \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2])", "enumerate(lines): # Only the test set has a header if set_type == \"test\"", "enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a", "the RTE data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\"", "return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\" def get_train_examples(self, data_dir):", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self,", "= self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See", "tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"entailment\" else: label", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Licensed under the Apache License, Version", "\"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index =", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"]", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data", "base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = []", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base", "tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the", "examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set (GLUE version).\"\"\" def get_train_examples(self,", "trec_data = {'X': [], 'y': []} with os.open(fpath, 'r', encoding='latin-1') as f: for", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See base class.\"\"\" return", "dev sets.\"\"\" examples = [] for (i, dict) in enumerate(dicts): guid = \"%s-%s\"", "= line.strip().split(':', 1) sample = sample.split(' ', 1)[1].split() trec_data['X'].append(sample) trec_data['y'].append(target) return trec_data def", "f: if skipFirstLine: skipFirstLine = False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4])", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self):", "NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir,", "class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set (SentEval version).\"\"\" def loadFile(self, fpath):", "MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "tokenization.convert_to_unicode(line[4]) if set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append(", "reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\" def", "enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type, i) if", "else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return", "with the License. # You may obtain a copy of the License at", "label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See", "if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0", "= tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b", "law or agreed to in writing, software # distributed under the License is", "= True sick_data = {'X_A': [], 'X_B': [], 'y': []} with os.open(fpath, 'r',", "the License. #################################################### #################################################### # IMPORT STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<<", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See", "text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "\"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP", "Only must be specified for sequence pair tasks. label: (Optional) string. The label", "== 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\":", "in compliance with the License. # You may obtain a copy of the", "= tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= label class DataProcessor(object): \"\"\"Base class for data converters for sequence classification data", "text_a=text_a, text_b=text_b, label=label)) return examples class STSBProcessor(DataProcessor): \"\"\"Processor for the STS-B data set.\"\"\"", "sets.\"\"\" examples = [] for (i, line) in enumerate(lines): # Only the test", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples", "= \"0\" else: if len(line) != 6: # there is a problematic line", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for", "# limitations under the License. #################################################### #################################################### # IMPORT STATEMENTS #################################################### # >>>>>>", "'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\"", "#################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self,", "return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples", "= tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"0\" else:", "Unique id for the example. text_a: string. The untokenized text of the first", "for line in f: if skipFirstLine: skipFirstLine = False else: text = line.strip().split('\\t')", "permissions and # limitations under the License. #################################################### #################################################### # IMPORT STATEMENTS ####################################################", "\"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class", "guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2])", "must be specified for sequence pair tasks. label: (Optional) string. The label of", "of the second sequence. Only must be specified for sequence pair tasks. label:", "tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the", "data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def", "enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type, i) text_a", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples for the", "= tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3])", "\"\"\"See base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for", "sequence pair tasks. label: (Optional) string. The label of the example. This should", "tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text", "guid: Unique id for the example. text_a: string. The untokenized text of the", "self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\")", "\"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base", "\"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self,", "= tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor):", "text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label = \"contradiction\" else: label =", "'r', encoding='latin-1') as f: for line in f: target, sample = line.strip().split(':', 1)", "data_dir): \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\" raise NotImplementedError() def", "= csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return", "% (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\":", "= \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None,", "\"\"\"Processor for the MRPC data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base", "line) in enumerate(lines): # Only the test set has a header if set_type", "training and dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines): if", "text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"]", "\"\"\"Processor for the TREC data set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data =", "raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\" raise", "return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\" def __init__(self): self.language", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def", "\"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label))", "(set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) label", "0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b =", "guid = \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label =", "= self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See", "if set_type == \"test\": label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid,", "return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')),", "(i, line) in enumerate(lines): if i == 0: continue guid = \"train-%d\" %", "= tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for", "continue guid = \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language):", "class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\",", "this file except in compliance with the License. # You may obtain a", "tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the", "for the specific language governing permissions and # limitations under the License. ####################################################", "False else: text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir):", "[] for (i, line) in enumerate(lines): if i == 0: continue guid =", "text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"0\"", "0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b =", "pair tasks. label: (Optional) string. The label of the example. This should be", "= tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1])", "quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f: reader", "SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "for data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a", "label of the example. This should be specified for train and dev examples,", "text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return", "@classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with tf.gfile.Open(input_file,", "text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE", "= tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See base", "examples, but not for test examples. \"\"\" self.guid = guid self.text_a = text_a", "= {'X': [], 'y': []} with os.open(fpath, 'r', encoding='latin-1') as f: for line", "self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def", "\"\"\"Base class for data converters for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir):", "== \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b,", "= tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See", "set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0])", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\",", "examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples =", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base", "loadFile(self, fpath): skipFirstLine = True sick_data = {'X_A': [], 'X_B': [], 'y': []}", "tokenization #################################################### # CODE #################################################### class InputExample(object): \"\"\"A single training/test example for simple", "and dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines): if i", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self, lines, set_type): \"\"\"Creates examples for", "= tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[0]) label = tokenization.convert_to_unicode(line[1]) examples.append(", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\",", "class DataProcessor(object): \"\"\"Base class for data converters for sequence classification data sets.\"\"\" def", "[] for (i, line) in enumerate(lines): # Only the test set has a", "def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def _create_examples(self,", "set_type == \"test\" and i == 0: continue guid = \"%s-%s\" % (set_type,", "a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized", "guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append(", "`InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets a collection", "_read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as", "base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir,", "required by applicable law or agreed to in writing, software # distributed under", "for the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "text of the second sequence. Only must be specified for sequence pair tasks.", "0: continue guid = \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if language !=", "for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI", "fpath): skipFirstLine = True sick_data = {'X_A': [], 'X_B': [], 'y': []} with", "tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append(", "= text_a self.text_b = text_b self.label = label class DataProcessor(object): \"\"\"Base class for", "of `InputExample`s for prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels", "text of the first sequence. For single sequence tasks, only this sequence must", "text = line.strip().split('\\t') sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See base", "label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples", "(i, dict) in enumerate(dicts): guid = \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X'])", "header if set_type == \"test\" and i == 0: continue guid = \"%s-%s\"", "collection of `InputExample`s for the dev set.\"\"\" raise NotImplementedError() def get_test_examples(self, data_dir): \"\"\"Gets", "self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i, line) in enumerate(lines): if i ==", "= tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1])", "the example. text_a: string. The untokenized text of the first sequence. For single", "line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data", "return [\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data set", "data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i,", "get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self,", "#################################################### # >>>>>> Native Imports <<<<<<< import os # >>>>>> Package Imports <<<<<<<", "for the MRPC data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\"", "continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a =", "len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')),", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data", "reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line in reader: lines.append(line)", "= \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a", "\"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y'])", "\"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir,", "input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\" with tf.gfile.Open(input_file, \"r\") as f:", "def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates", "float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE", "text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines =", "label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples #################################################### # MAIN ####################################################", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True):", "self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def", "import tensorflow as tf import csv # >>>>>> Local Imports <<<<<<< from retrograph.models", "for (i, line) in enumerate(lines): # Only the test set has a header", "% (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2])", "tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label = tokenization.convert_to_unicode(\"contradiction\") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "tokenization.convert_to_unicode(line[1]) label = \"0\" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid,", "'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and dev", "def get_dev_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\" raise", "continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2])", "base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\"", "def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), \"train\") def get_dev_examples(self, data_dir):", "# you may not use this file except in compliance with the License.", "def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self):", "get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the", "i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type ==", "AI Language Team Authors. # Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> #", "class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the XNLI data set.\"\"\"", "#################################################### # CODE #################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\",", "self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def", "for sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s", "self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def", "\"r\") as f: reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar) lines = [] for line", "text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set", "(i, line) in enumerate(lines): # Only the test set has a header if", "set has a header if set_type == \"test\" and i == 0: continue", "self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\")", "\"\"\" self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label", "\"0\" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class", "specified for train and dev examples, but not for test examples. \"\"\" self.guid", "License for the specific language governing permissions and # limitations under the License.", "STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<< import os # >>>>>> Package Imports", "encoding='latin-1') as f: for line in f: target, sample = line.strip().split(':', 1) sample", "class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir):", "class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor", "data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP", "[] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): \"\"\"Processor for the", "\"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a,", "label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3]) text_b = tokenization.convert_to_unicode(line[-2]) label = float(line[-1])", "% (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\":", "text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): \"\"\"Processor for the CoLA data set", "\"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples(", "coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # Copyright 2019", "else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor):", "return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label'))", "# Copyright 2018 The Google AI Language Team Authors. # Copyright 2019 <NAME>,", "_create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "'ENTY', 'HUM', 'LOC', 'NUM'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training", "tf import csv # >>>>>> Local Imports <<<<<<< from retrograph.models import tokenization ####################################################", "self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base", "of the first sequence. For single sequence tasks, only this sequence must be", "InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): \"\"\"Processor for the MRPC data", "for the training and dev sets.\"\"\" examples = [] for (i, dict) in", "else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test_mismatched.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\",", "(set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid,", "the STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "prediction.\"\"\" raise NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this data", "`InputExample`s for the train set.\"\"\" raise NotImplementedError() def get_dev_examples(self, data_dir): \"\"\"Gets a collection", "0: continue guid = \"train-%d\" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1])", "self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")),", "tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label =", "set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value", "data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[:split_index],", "data set.\"\"\" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated", "return examples def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor):", "self.text_b = text_b self.label = label class DataProcessor(object): \"\"\"Base class for data converters", "\"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self):", "self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples '''Added by Anne''' class SST2Processor(DataProcessor):", "Copyright 2018 The Google AI Language Team Authors. # Copyright 2019 <NAME>, <NAME>,", "\"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\"", "2.0 (the \"License\"); # you may not use this file except in compliance", "== 0: continue guid = \"dev-%d\" % (i) language = tokenization.convert_to_unicode(line[0]) if language", "dicts, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples = []", "# there is a problematic line print(line) continue text_a = tokenization.convert_to_unicode(line[3]) text_b =", "\"multinli.train.%s.tsv\" % self.language)) examples = [] for (i, line) in enumerate(lines): if i", "data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def get_test_examples(self, data_dir):", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir,", "# CODE #################################################### class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\"", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): \"\"\"See base class.\"\"\"", "get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples", "either express or implied. # See the License for the specific language governing", "[], 'y': []} with os.open(fpath, 'r', encoding='latin-1') as f: for line in f:", "The label of the example. This should be specified for train and dev", "0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type == \"test\": text_a", "tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type == \"test\": label = \"0\" else: label", "import tokenization #################################################### # CODE #################################################### class InputExample(object): \"\"\"A single training/test example for", "split_index = len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data", "for the example. text_a: string. The untokenized text of the first sequence. For", "def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self,", "text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label = 0.0 else: text_a = tokenization.convert_to_unicode(line[-3])", "tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, label=label)) return examples #################################################### # MAIN", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"0\", \"1\"]", "return examples def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples", "for the STS-B data set.\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "the License. # You may obtain a copy of the License at #", "def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and dev sets.\"\"\" examples", "\"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return", "return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def", "data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for the train", "with os.open(fpath, 'r', encoding='utf-8') as f: for line in f: if skipFirstLine: skipFirstLine", "self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\") def get_test_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return", "SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set (SentEval version).\"\"\" def loadFile(self, fpath): skipFirstLine", "data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else:", "class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL',", "dev sets.\"\"\" examples = [] for (i, line) in enumerate(lines): if i ==", "= \"%s-%s\" % (set_type, str(i)) text_a = tokenization.convert_to_unicode(dict['X']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid,", "\"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training and dev", "True sick_data = {'X_A': [], 'X_B': [], 'y': []} with os.open(fpath, 'r', encoding='utf-8')", "return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "'r', encoding='utf-8') as f: for line in f: if skipFirstLine: skipFirstLine = False", "and # limitations under the License. #################################################### #################################################### # IMPORT STATEMENTS #################################################### #", "get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language))", "label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK data set (SentEval version).\"\"\"", "if set_type == \"test\": label = \"0\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid,", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "if len(line) != 6: # there is a problematic line print(line) continue text_a", "Native Imports <<<<<<< import os # >>>>>> Package Imports <<<<<<< import tensorflow as", "This should be specified for train and dev examples, but not for test", "class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self, data_dir):", "def get_labels(self): \"\"\"See base class.\"\"\" return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type):", "line) in enumerate(lines): if i == 0: continue guid = \"%s-%s\" % (set_type,", "= tokenization.convert_to_unicode(line[2]) label = \"0\" else: if len(line) != 6: # there is", "self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self,", "examples def get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor", "\"xnli.dev.tsv\")) examples = [] for (i, line) in enumerate(lines): if i == 0:", "#################################################### # IMPORT STATEMENTS #################################################### # >>>>>> Native Imports <<<<<<< import os #", "text_b: (Optional) string. The untokenized text of the second sequence. Only must be", "def get_labels(self): \"\"\"See base class.\"\"\" return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM'] def", "MultiNLI data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "Args: guid: Unique id for the example. text_a: string. The untokenized text of", "text_b self.label = label class DataProcessor(object): \"\"\"Base class for data converters for sequence", "class.\"\"\" lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\")) examples = [] for (i, line) in enumerate(lines):", "tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label = \"contradiction\" else: label", "for the MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\"", "str(i)) text_a = tokenization.convert_to_unicode(dict['X_A']) text_b = tokenization.convert_to_unicode(dict['X_B']) label = tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "for simple sequence classification.\"\"\" def __init__(self, guid, text_a, text_b=None, label=None): \"\"\"Constructs a InputExample.", "def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines = self._read_tsv( os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" %", "sequence classification data sets.\"\"\" def get_train_examples(self, data_dir): \"\"\"Gets a collection of `InputExample`s for", "get_labels(self): \"\"\"See base class.\"\"\" return [\"contradiction\", \"entailment\", \"neutral\"] def _create_examples(self, lines, set_type): \"\"\"Creates", "base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), \"dev\") def get_test_examples(self, data_dir): \"\"\"See base class.\"\"\" return", "= text_b self.label = label class DataProcessor(object): \"\"\"Base class for data converters for", "0: continue guid = \"%s-%s\" % (set_type, i) if set_type == \"test\": text_a", "= tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WNLIProcessor(DataProcessor): \"\"\"Processor for", "== 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b", "class InputExample(object): \"\"\"A single training/test example for simple sequence classification.\"\"\" def __init__(self, guid,", "\"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"diagnostic.tsv\")), \"test\") def get_labels(self): \"\"\"See base class.\"\"\"", "version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") def", "tokenization.convert_to_unicode(line[9]) if set_type == \"test\": label = \"contradiction\" else: label = tokenization.convert_to_unicode(line[-1]) examples.append(", "if i == 0: continue guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) if set_type", "examples class TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set (SentEval version).\"\"\" def loadFile(self,", "i == 0: continue guid = \"%s-%s\" % (set_type, i) if set_type ==", "examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SICKEntailmentProcessor(DataProcessor): \"\"\"Processor for the SICK", "QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See", "set.\"\"\" def __init__(self): self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" lines", "continue guid = \"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4])", "\"train.tsv\")), \"train\") def get_dev_examples(self, data_dir, matched=True): \"\"\"See base class.\"\"\" if matched: return self._create_examples(", "text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor): \"\"\"Processor for the QQP data set (GLUE", "= float(line[-1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the", "DataProcessor(object): \"\"\"Base class for data converters for sequence classification data sets.\"\"\" def get_train_examples(self,", "base class.\"\"\" if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir,", "\"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) text_b = tokenization.convert_to_unicode(line[2]) if set_type ==", "except in compliance with the License. # You may obtain a copy of", "raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): \"\"\"Reads a tab separated value file.\"\"\"", "(set_type, tokenization.convert_to_unicode(line[0])) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-2]) text_b = tokenization.convert_to_unicode(line[-1]) label", "if i == 0: continue guid = \"%s-%s\" % (set_type, i) if set_type", "= \"%s-%s\" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type", "in enumerate(lines): if i == 0: continue guid = \"train-%d\" % (i) text_a", "MRPC data set (GLUE version).\"\"\" def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(", "label: (Optional) string. The label of the example. This should be specified for", "get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "[]} with os.open(fpath, 'r', encoding='latin-1') as f: for line in f: target, sample", "examples '''Added by Anne''' class SST2Processor(DataProcessor): \"\"\"Processor for the CoLA data set (GLUE", "InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class QQPProcessor(DataProcessor): \"\"\"Processor for the QQP data", "\"test\": label = \"entailment\" else: label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))", "\"train.tsv\")), \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")", "= [] for (i, line) in enumerate(lines): if i == 0: continue guid", "NotImplementedError() def get_labels(self): \"\"\"Gets the list of labels for this data set.\"\"\" raise", "TRECProcessor(DataProcessor): \"\"\"Processor for the TREC data set (SentEval version).\"\"\" def loadFile(self, fpath): trec_data", "else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor):", "f: for line in f: target, sample = line.strip().split(':', 1) sample = sample.split('", "if matched: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \"dev_matched\") else: return self._create_examples( self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")), \"dev_mismatched\")", "\"entailment\", \"neutral\"] class MnliProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def", "class.\"\"\" return [\"entailment\", \"not_entailment\"] def _create_examples(self, lines, set_type): \"\"\"Creates examples for the training", "text_a=text_a, text_b=text_b, label=label)) return examples class RTEProcessor(DataProcessor): \"\"\"Processor for the RTE data set", "= \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type", "version).\"\"\" def loadFile(self, fpath): trec_data = {'X': [], 'y': []} with os.open(fpath, 'r',", "sick_data['X_A'].append(text[1].split()) sick_data['X_B'].append(text[2].split()) sick_data['y'].append(text[4]) return sick_data def get_train_examples(self, data_dir): \"\"\"See base class.\"\"\" return self._create_examples(self.loadFile(os.path.join(data_dir,", "examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\" def get_test_examples(self,", "tokenization.convert_to_unicode(dict['y']) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class TRECProcessor(DataProcessor): \"\"\"Processor for the", "base class.\"\"\" data = self.loadFile(os.path.join(data_dir, 'train_5500.label')) split_index = len(data)*0.7 return self._create_examples(data[split_index:], \"dev\") def", "len(data)*0.7 return self._create_examples(data[:split_index], \"train\") def get_dev_examples(self, data_dir): \"\"\"See base class.\"\"\" data = self.loadFile(os.path.join(data_dir,", "\"\"\"Processor for the XNLI data set.\"\"\" def __init__(self): self.language = \"zh\" def get_train_examples(self,", "['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT'] def _create_examples(self, dicts, set_type): \"\"\"Creates examples for the training and", "XNLI data set.\"\"\" def __init__(self): self.language = \"zh\" def get_train_examples(self, data_dir): \"\"\"See base", "= tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode(\"contradictory\"): label", "(set_type, i) if set_type == \"test\": text_a = tokenization.convert_to_unicode(line[1]) label = \"0\" else:", "tokenization.convert_to_unicode(line[4]) label = tokenization.convert_to_unicode(line[5]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QNLIProcessor(DataProcessor):", "os.path.join(data_dir, \"multinli\", \"multinli.train.%s.tsv\" % self.language)) examples = [] for (i, line) in enumerate(lines):", "sets.\"\"\" examples = [] for (i, dict) in enumerate(dicts): guid = \"%s-%s\" %", "tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class DiagnosticProcessor(DataProcessor): \"\"\"Processor for the", "##################################################### # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. #" ]
[ "new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1", "import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the", "priceFrame as (Date, Opening Price, Closing Price, Volume) and return a combined frame", "2) sort according to datetime in descending order (newest first) 3) remove links,", "by date and sentiment and priceFrame as (Date, Opening Price, Closing Price, Volume)", "= 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment']", "dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message']", "AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv',", "if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda", "pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod", "'symbol' from data-extractor and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64),", "= df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price']", "return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads the price data of 'symbol'", "order (newest first) 3) remove links, @ and $ references, extra whitespaces, extra", "next nth day returns dataframes for AAPL, AMZN, GOOGL respectively \"\"\" if not", "from the two lists before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set()", "if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days =", "saves it as a csv file (appending '_preprocessed' before '.csv). The preprocessing us", "price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date']", "as file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for", "\"\"\" get_data loads the preprocessed data of 'symbol' from data-extractor and returns a", "tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price'])", "index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set()", "Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads", "if not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data", "@classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits data for improved accuracy", "a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location", "tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv',", "= pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod", "= set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens)", "'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords -", "following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to", "bearish and bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if", "label of next nth day returns dataframes for AAPL, AMZN, GOOGL respectively \"\"\"", "extract keywords from labelled stocktwits data for improved accuracy in scoring for each", "remove stop words else ignore the word remove intersections from the two lists", "convert everything to lower case \"\"\" if 'datetime' in columns: dataFrame = pd.read_csv(file_location,", "{} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous']", "dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL,", "== 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif", "priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message']", "if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else:", "days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] =", "dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if", "def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock prediction analysis in the following", "returns dataframes for AAPL, AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv')", "ignore the word remove intersections from the two lists before saving \"\"\" dataFrame", "x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x))", "is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\"", "x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier", "if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords =", "'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis()", "Opening Price, Closing Price, Volume) and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish,", "lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens =", "sentimentFrame, priceFrame): from datetime import timedelta \"\"\" receive sentimentFrame as (date, sentiment, message)", "using label of next nth day returns dataframes for AAPL, AMZN, GOOGL respectively", "dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date())", "index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False)", "label) Standardize the data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location):", "GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False) data = pd.read_csv(file_location) return data", "== date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)]", "bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete')", "before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np", "'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening", "label) we have choice to take previous n days sentiment_calculated and using label", "returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\"", "columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom", "and datetime columns. 2) sort according to datetime in descending order (newest first)", "'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords =", "= dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False)", "$ references, extra whitespaces, extra '.', digits, slashes, hyphons 4) decode html entities", "LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads the", "False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads", "(os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL", "dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL", "= dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for", "priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False:", "file location and saves it as a csv file (appending '_preprocessed' before '.csv).", "'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if", "\"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2)", "= pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled", "@classmethod def get_training_data(cls): \"\"\" get labelled training data with equal bearish and bullish", "date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days", "', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda", "dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads the price data of 'symbol' from", "for stock prediction analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change,", "bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords", "dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False)", "pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads the price data of", "\"\"\" handle preprocessing and loading of data. \"\"\" import html import os.path import", "@classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled data of stocktwits", "the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice", "cash_volume, label) we have choice to take previous n days sentiment_calculated and using", "in descending order (newest first) 3) remove links, @ and $ references, extra", "columns=['datetime', 'message']): \"\"\" preprocess the data in file location and saves it as", "'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv'", "data-extractor and returns a pandas dataframe with columns [sentiment(object), message(object)]. \"\"\" if type", "it as a csv file (appending '_preprocessed' before '.csv). The preprocessing us in", "sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL)", "dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish", "extract message and datetime columns. 2) sort according to datetime in descending order", "datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location)", "dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled data of", "dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls,", "pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL =", "from data-extractor and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing", "to take previous n days sentiment_calculated and using label of next nth day", "file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads", "dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining", "n days sentiment_calculated and using label of next nth day returns dataframes for", "nltk import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer", "(appending '_preprocessed' before '.csv). The preprocessing us in following ways: 1) extract message", "Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price',", "combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta \"\"\" receive sentimentFrame as (date, sentiment,", "priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 =", "elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif", "days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 =", "LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True)", "bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish =", "= dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False)", "dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training and test data", "combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training,", "the data in file location and saves it as a csv file (appending", "dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta", "dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date',", "'a') as file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\"", "dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish and bullish words", "references, extra whitespaces, extra '.', digits, slashes, hyphons 4) decode html entities 5)", "digits, slashes, hyphons 4) decode html entities 5) convert everything to lower case", "LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)]", "days sentiment_calculated and using label of next nth day returns dataframes for AAPL,", "= LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'},", "\"\"\" compile stocktwits data for stock prediction analysis in the following form (date,", "\"\"\" extract keywords from labelled stocktwits data for improved accuracy in scoring for", "POS tagging 3) if a sense is present in wordnet then, lemmatize the", "with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a')", "'symbol' from data-extractor and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\"", "= priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] ==", "dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN =", "labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits data for improved accuracy in scoring", "(sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before using. \"\"\" file_location", "- tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing", "sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to take previous n days sentiment_calculated", "and using label of next nth day returns dataframes for AAPL, AMZN, GOOGL", "price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date']", "set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'):", "custom lexicon of bearish and bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2", "respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or", "- bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for", "return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish and bullish", "in file location and saves it as a csv file (appending '_preprocessed' before", "False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime']", "os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv'", "improved accuracy in scoring for each labelled message do 1) tokenize the message", "stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords", "updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed data of", "the word and remove stop words else ignore the word remove intersections from", "combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date',", "== date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days", "'? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] =", "return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock prediction analysis", "= LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False)", "bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in", "in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed data", "get_price_data(cls, symbol): \"\"\" loads the price data of 'symbol' from data-extractor and returns", "if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location =", "== date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish']", "timedelta \"\"\" receive sentimentFrame as (date, sentiment, message) indexed by date and sentiment", "as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for date,", "get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed data of 'symbol' from data-extractor and", "print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits", "set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens", "return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled training data with equal", "= 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment']", "for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the", "to datetime in descending order (newest first) 3) remove links, @ and $", "in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif", "pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish", "take previous n days sentiment_calculated and using label of next nth day returns", "not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl'", "LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv',", "index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False)", "dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ',", "with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame", "(sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for date, df", "from data-extractor and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location", "'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1,", "'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] =", "word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed", "x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL =", "= 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL =", "index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False)", "dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv',", "according to datetime in descending order (newest first) 3) remove links, @ and", "pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location =", "os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location)", "= WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message'])", "= pos_tag(tokens) selected_tags = set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'):", "tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False):", "re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message']", "datetime import timedelta \"\"\" receive sentimentFrame as (date, sentiment, message) indexed by date", "new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] =", "previous n days sentiment_calculated and using label of next nth day returns dataframes", "dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if", "sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days +=", "= 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row)", "of 'symbol' from data-extractor and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])].", "in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for i", "= 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test': file_location =", "parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN =", "dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words", "dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled training data with equal bearish and", "dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL", "have choice to take previous n days sentiment_calculated and using label of next", "= dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file)", "preprocessed data of 'symbol' from data-extractor and returns a pandas dataframe with columns", "AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv',", "False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime']", "columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame", "days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1", "AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals", "dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get", "get_training_data(cls): \"\"\" get labelled training data with equal bearish and bullish messages \"\"\"", "columns [sentiment(object), message(object)]. \"\"\" if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location)", "os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word'])", "-1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile", "'.', digits, slashes, hyphons 4) decode html entities 5) convert everything to lower", "receive sentimentFrame as (date, sentiment, message) indexed by date and sentiment and priceFrame", "stocktwits from data-extractor and returns a pandas dataframe with columns [sentiment(object), message(object)]. \"\"\"", "as (date, sentiment, message) indexed by date and sentiment and priceFrame as (Date,", "dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN", "= np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test =", "a csv file (appending '_preprocessed' before '.csv). The preprocessing us in following ways:", "dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ',", "price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)]", "' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] =", "(newest first) 3) remove links, @ and $ references, extra whitespaces, extra '.',", "re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message']", "days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1", ">= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def", "for AAPL, AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')):", "pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\"", "x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x))", "parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'],", "combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv',", "intersections from the two lists before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords =", "LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is", "'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame", "pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is", "in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have", "Price']) >= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod", "dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file", "to lower case \"\"\" if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'],", "html import os.path import pandas as pd import re from nltk import word_tokenize,", "data with equal bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass", "', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda", "stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime',", "Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price',", "False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish =", "'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords", "\"\"\" get custom lexicon of bearish and bullish words respectively \"\"\" file_location1 =", "with equal bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame", "LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1),", "cash_volume, label) Standardize the data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not", "os.path import pandas as pd import re from nltk import word_tokenize, pos_tag from", "combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test", "dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date',", "dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta \"\"\" receive", "== 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords", "sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL')", "def get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish and bullish words respectively \"\"\"", "whitespaces, extra '.', digits, slashes, hyphons 4) decode html entities 5) convert everything", "dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv',", "in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords: file.write(word+\"\\n\")", "for improved accuracy in scoring for each labelled message do 1) tokenize the", "in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] ==", "index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits data for improved", "os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish']", "os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN,", "pos = pos_tag(tokens) selected_tags = set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if", "\"\"\" loads the price data of 'symbol' from data-extractor and returns a pandas", "not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date']", "as file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data", "= LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'},", "extra whitespaces, extra '.', digits, slashes, hyphons 4) decode html entities 5) convert", "'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the", "in following ways: 1) extract message and datetime columns. 2) sort according to", "infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count()", "stop words else ignore the word remove intersections from the two lists before", "and $ references, extra whitespaces, extra '.', digits, slashes, hyphons 4) decode html", "type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor and", "if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls,", "ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta \"\"\"", "prediction analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label)", "'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] =", "before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer", "@classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training and test data for", "date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days =", "nth day returns dataframes for AAPL, AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv')", "False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\"", "words else ignore the word remove intersections from the two lists before saving", "3) if a sense is present in wordnet then, lemmatize the word and", "ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] =", "in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not", "respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib", "combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date')", "message do 1) tokenize the message 2) perform POS tagging 3) if a", "joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x:", "index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL =", "selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0],", "is False: LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is", "tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] =", "dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv',", "= np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test =", "of data. \"\"\" import html import os.path import pandas as pd import re", "joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x:", "dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+',", "= LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL", "def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled data of stocktwits from", "= pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL", "sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)]", "stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data", "x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "and test data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume,", "and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL =", "a sense is present in wordnet then, lemmatize the word and remove stop", "with columns [sentiment(object), message(object)]. \"\"\" if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if", "(Date, Opening Price, Closing Price, Volume) and return a combined frame as (sentiment_calculated_bullish,", "x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "priceFrame): from datetime import timedelta \"\"\" receive sentimentFrame as (date, sentiment, message) indexed", "import timedelta \"\"\" receive sentimentFrame as (date, sentiment, message) indexed by date and", ">= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label']", "== 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords", "= bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as", "False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location)", "len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n'))", "x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod", "is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls):", "AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv',", "= np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test =", "perform POS tagging 3) if a sense is present in wordnet then, lemmatize", "cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current", "dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL", "((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row,", "dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime", "usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda", "new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date,", "', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\"", "pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] ==", "'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test':", "from datetime import timedelta \"\"\" receive sentimentFrame as (date, sentiment, message) indexed by", "\"\"\" if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv',", "re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message']", "OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining =", "dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock", "AAPL, AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from", "columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame =", "dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN,", "-1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if", "handle preprocessing and loading of data. \"\"\" import html import os.path import pandas", "new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price'])", "Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\"", "= priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1", "sort according to datetime in descending order (newest first) 3) remove links, @", "price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date']", "html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1),", "tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv'", "preprocessed labelled data of stocktwits from data-extractor and returns a pandas dataframe with", "x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file =", "= LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier", "aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock prediction analysis in the following form", "index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False)", "priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location)", "sense is present in wordnet then, lemmatize the word and remove stop words", "LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish,", "if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment',", "stock prediction analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume,", "= 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1", "= dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ',", "Volume) and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label)", "the data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy", "pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'):", "file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'],", "pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def", "is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word'])", "= dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv',", "names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\"", "= dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from", "dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads the price", "if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location =", "row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags)", "LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL", "'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon", "ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False)", "file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame", "-= stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish':", "us in following ways: 1) extract message and datetime columns. 2) sort according", "LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english'))", "columns. 2) sort according to datetime in descending order (newest first) 3) remove", "dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is", "dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] ==", "'a') as file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file:", "else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\"", "dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False)", "'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv'", "combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv',", "WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos", "is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish", "and sentiment and priceFrame as (Date, Opening Price, Closing Price, Volume) and return", "word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])):", "for date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty", "'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location", "\"\"\" preprocess the data in file location and saves it as a csv", "5) convert everything to lower case \"\"\" if 'datetime' in columns: dataFrame =", "in wordnet then, lemmatize the word and remove stop words else ignore the", "= price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0)", "new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >=", "LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get", "from nltk import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import", "def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta \"\"\" receive sentimentFrame as (date,", "\"\"\" get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor and returns", "if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod", "@classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta \"\"\" receive sentimentFrame as", "= 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change']", "indexed by date and sentiment and priceFrame as (Date, Opening Price, Closing Price,", "labelled training data with equal bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except", "else ignore the word remove intersections from the two lists before saving \"\"\"", "sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0,", "\"\"\" dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date']", "loads the price data of 'symbol' from data-extractor and returns a pandas dataframe", "inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True)", "dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training and test", "location and saves it as a csv file (appending '_preprocessed' before '.csv). The", "inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True)", "x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits", "parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get", "dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False:", "pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)", "= dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL =", "index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta \"\"\" receive sentimentFrame", "LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv',", "of 'symbol' from data-extractor and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening", "1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 =", "\"\"\" if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False)", "The preprocessing us in following ways: 1) extract message and datetime columns. 2)", "LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True)", "x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN =", "symbol): \"\"\" loads the price data of 'symbol' from data-extractor and returns a", "date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days +=", "pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags)", "for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the", "and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish", "Price, Closing Price, Volume) and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous,", "new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing", "infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'],", "= bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords -", "AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test", "is present in wordnet then, lemmatize the word and remove stop words else", "sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL", "dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True)", "and saves it as a csv file (appending '_preprocessed' before '.csv). The preprocessing", "'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame", "dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count()", "joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x:", "Standardize the data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import", "dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False) data", "@classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the data in file location", "selected_tags = set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a'))", "two lists before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords =", "tokenize the message 2) perform POS tagging 3) if a sense is present", "a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location)", "dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits data for", "'.csv). The preprocessing us in following ways: 1) extract message and datetime columns.", "= df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >=", "sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to take previous n", "x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False:", "index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv',", "dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN", "[int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))])", "ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock prediction", "loads the preprocessed data of 'symbol' from data-extractor and returns a pandas dataframe", "type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message'])", "usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ',", "WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the data", "= pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod", "(date, sentiment, message) indexed by date and sentiment and priceFrame as (Date, Opening", "word and remove stop words else ignore the word remove intersections from the", "\"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location", "= df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price']", "= joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda", "message and datetime columns. 2) sort according to datetime in descending order (newest", "remove links, @ and $ references, extra whitespaces, extra '.', digits, slashes, hyphons", "dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled", "\"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return", "dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'],", "sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv'", "tweet_volume_change, cash_volume, label) we have choice to take previous n days sentiment_calculated and", "os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL')", "= LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False)", "data. \"\"\" import html import os.path import pandas as pd import re from", "range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'):", "lexicon of bearish and bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 =", "= dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False)", "import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location,", "index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL =", "of next nth day returns dataframes for AAPL, AMZN, GOOGL respectively \"\"\" if", "priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL =", "re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def", "bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt',", "in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before using.", "import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class", "csv file (appending '_preprocessed' before '.csv). The preprocessing us in following ways: 1)", "= bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with", "nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls,", "labelled stocktwits data for improved accuracy in scoring for each labelled message do", "= set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif", "re from nltk import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet", "bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords: file.write(word+\"\\n\")", "pandas as pd import re from nltk import word_tokenize, pos_tag from nltk.corpus import", "+= 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] = date", "get labelled training data with equal bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv')", "'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame =", "x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x))", "= set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index,", "a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame =", "((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() -", "wordnet then, lemmatize the word and remove stop words else ignore the word", "df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0)", "= 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return", "columns=['sentiment', 'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False:", "and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)].", "symbol='ALL', type='training'): \"\"\" get the training and test data for stock prediction in", "selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish':", "= bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords:", "tweet_volume_change, cash_volume, label) Standardize the data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if", "data of 'symbol' from data-extractor and returns a pandas dataframe with columns [message(object),", "elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords =", "= LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file)", "[Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location,", "bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is", "return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame", "= dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '?", "'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data", "dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords", "in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame =", "bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords", "symbol): \"\"\" get_data loads the preprocessed data of 'symbol' from data-extractor and returns", "= 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True)", "Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0]", "form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to take", "dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining", "'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def", "bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords", "usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls,", "price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else", "x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False:", "infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL,", "os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None,", "price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] =", "dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN,", "using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np dataAAPL,", "Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing", "for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags =", "for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in", "dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv',", "= dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN =", "GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False) data = pd.read_csv(file_location) return", "price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1", "parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL", "frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for", "'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data()", "dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled training data with equal bearish", "file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish", "dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier =", "= 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1", "and priceFrame as (Date, Opening Price, Closing Price, Volume) and return a combined", "date and sentiment and priceFrame as (Date, Opening Price, Closing Price, Volume) and", "file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL", "pos_tag(tokens) selected_tags = set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0],", "os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN =", "= LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining =", "infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls,", "sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN)", "from labelled stocktwits data for improved accuracy in scoring for each labelled message", "os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def", "and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\"", "html entities 5) convert everything to lower case \"\"\" if 'datetime' in columns:", "AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv',", "dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower())", "sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index:", "bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word", "= 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod", "dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from", "Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum()", "links, @ and $ references, extra whitespaces, extra '.', digits, slashes, hyphons 4)", "joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL')", "x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or", "while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {}", "get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training and test data for stock prediction", "index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)", "combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))])", "bullish_keywords = set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for", "x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv',", "sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before using. \"\"\" file_location =", "as pd import re from nltk import word_tokenize, pos_tag from nltk.corpus import stopwords,", "elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment']", "preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the data in file location and saves", "sentiment, message) indexed by date and sentiment and priceFrame as (Date, Opening Price,", "updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod", "infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the", "inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True)", "new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1", "AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test", "sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame() for date, df in", "infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled", "entities 5) convert everything to lower case \"\"\" if 'datetime' in columns: dataFrame", "data in file location and saves it as a csv file (appending '_preprocessed'", "= dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', '", "= {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message']", "training and test data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change,", "= dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame):", "file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file", "prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before", "priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False)", "', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda", "= pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): \"\"\" loads the price data", "= 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment']", "import re from nltk import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from", "np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date',", "get_data loads the preprocessed data of 'symbol' from data-extractor and returns a pandas", "[int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))])", "before '.csv). The preprocessing us in following ways: 1) extract message and datetime", "is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0])", "choice to take previous n days sentiment_calculated and using label of next nth", "np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1),", "loading of data. \"\"\" import html import os.path import pandas as pd import", "x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] =", "0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] =", "= priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date,", "'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data()", "', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] =", "row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords =", "wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']):", "preprocess the data in file location and saves it as a csv file", "inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location)", "selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif", "@classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish and bullish words respectively", "= pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'):", "then, lemmatize the word and remove stop words else ignore the word remove", "stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos =", "dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier =", "price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame", "priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty:", "GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import", "date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if", "data-extractor and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64),", "else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda", "= dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL =", "file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type ==", "data for improved accuracy in scoring for each labelled message do 1) tokenize", "file (appending '_preprocessed' before '.csv). The preprocessing us in following ways: 1) extract", "and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN", "except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining", "dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test", "dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ',", "infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x))", "= dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)", "\"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish", "1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 =", "pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled training", "everything to lower case \"\"\" if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns,", "dataFrame['message'].apply(lambda x: re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ',", "dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'],", "the message 2) perform POS tagging 3) if a sense is present in", "dataframes for AAPL, AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and", "sentiment_calculated and using label of next nth day returns dataframes for AAPL, AMZN,", "np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1),", "descending order (newest first) 3) remove links, @ and $ references, extra whitespaces,", "', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda", "get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor and returns a", "False: LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False:", "datetime in descending order (newest first) 3) remove links, @ and $ references,", "and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv'", "x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract", "+= 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date']", "\"\"\" get the training and test data for stock prediction in format (sentiment_calculated_bullish,", "training data with equal bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError:", "= sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days", "if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda", "dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame", "re.sub(r'\\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\s+', ' ', x)) dataFrame['message']", "returns a pandas dataframe with columns [sentiment(object), message(object)]. \"\"\" if type == 'complete':", "the training and test data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous,", "dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False)", "priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN", "= dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def", "if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum()", "@ and $ references, extra whitespaces, extra '.', digits, slashes, hyphons 4) decode", "dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data", "tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv'", "LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the data in file", "if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0],", "updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in", "combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) \"\"\" dataFrame = pd.DataFrame()", "= dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message']", "date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] =", "= joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda", "= pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count()", "def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training and test data for stock", "columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location,", "continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while", "stocktwits data for stock prediction analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish,", "'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] =", "' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls):", "4) decode html entities 5) convert everything to lower case \"\"\" if 'datetime'", "@classmethod def get_price_data(cls, symbol): \"\"\" loads the price data of 'symbol' from data-extractor", "price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1", "= 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file =", "\"\"\" receive sentimentFrame as (date, sentiment, message) indexed by date and sentiment and", "message) indexed by date and sentiment and priceFrame as (Date, Opening Price, Closing", "present in wordnet then, lemmatize the word and remove stop words else ignore", "= pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL", "dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL", "(date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to take previous", "'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment']", "dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for i in", "loads the preprocessed labelled data of stocktwits from data-extractor and returns a pandas", "open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as", "dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True)", "dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date())", "elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags", "label) \"\"\" dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current =", "sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL')", "format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before using. \"\"\"", "dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'],", "LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is", "data for stock prediction analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous,", "os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol):", "'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type", "import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL =", "os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x:", "is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if", "type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type", "df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1)", "with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv')", "= dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import", "x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file =", "2) perform POS tagging 3) if a sense is present in wordnet then,", "os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x:", "parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def", "\"\"\" get labelled training data with equal bearish and bullish messages \"\"\" try:", "dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv',", "parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "the preprocessed labelled data of stocktwits from data-extractor and returns a pandas dataframe", "dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date')", "set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags", "decode html entities 5) convert everything to lower case \"\"\" if 'datetime' in", "hyphons 4) decode html entities 5) convert everything to lower case \"\"\" if", "dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv',", "if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda", "the two lists before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords", "import html import os.path import pandas as pd import re from nltk import", "dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod", "'. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] =", "and bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1)", "inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)", "'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r'))", "= priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while", "index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False)", "tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] =", "message(object)]. \"\"\" if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False:", "datetime columns. 2) sort according to datetime in descending order (newest first) 3)", "LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier =", "elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment',", "== date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days", "is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0])", "dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL,", "= LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words =", "dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon of", "dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN", "lemmatize the word and remove stop words else ignore the word remove intersections", "of bearish and bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt'", "set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row", "Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date',", "\"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'],", "data before using. \"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as", "sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN')", "lower case \"\"\" if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True)", "def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits data for improved accuracy in", "df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] -", "not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data =", "dataframe with columns [sentiment(object), message(object)]. \"\"\" if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv'", "file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is", "for each labelled message do 1) tokenize the message 2) perform POS tagging", "or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2,", "the price data of 'symbol' from data-extractor and returns a pandas dataframe with", "import pandas as pd import re from nltk import word_tokenize, pos_tag from nltk.corpus", "the preprocessed data of 'symbol' from data-extractor and returns a pandas dataframe with", "test data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label)", "df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] -", "dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv',", "= 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False:", "Closing Price, Volume) and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change,", "if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame =", "1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] =", "as a csv file (appending '_preprocessed' before '.csv). The preprocessing us in following", "x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier", "return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training", "preprocessing us in following ways: 1) extract message and datetime columns. 2) sort", "as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date')", "inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False) data = pd.read_csv(file_location)", "from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\"", "priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)]", "x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x))", "= joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda", "bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt',", "dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv'", "names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled training data with", "dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+',", "tagging 3) if a sense is present in wordnet then, lemmatize the word", "numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True)", "False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return", "nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess", "lists before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set()", "we have choice to take previous n days sentiment_calculated and using label of", "messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish']", "and returns a pandas dataframe with columns [sentiment(object), message(object)]. \"\"\" if type ==", "selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] ==", "[sentiment(object), message(object)]. \"\"\" if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is", "do 1) tokenize the message 2) perform POS tagging 3) if a sense", "from data-extractor and returns a pandas dataframe with columns [sentiment(object), message(object)]. \"\"\" if", "\"\"\" import html import os.path import pandas as pd import re from nltk", "@classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock prediction analysis in the", "pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def", "= dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '.", "analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we", "dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame,", "3) remove links, @ and $ references, extra whitespaces, extra '.', digits, slashes,", "with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls,", "message 2) perform POS tagging 3) if a sense is present in wordnet", "LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)", "parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed", "new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing", "preprocessing and loading of data. \"\"\" import html import os.path import pandas as", "- price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume']", "AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training,", "word remove intersections from the two lists before saving \"\"\" dataFrame = LoadData.get_labelled_data()", "for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0],", "else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1", "pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod", "pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish and", "= LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'},", "each labelled message do 1) tokenize the message 2) perform POS tagging 3)", "file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test': file_location", "index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False)", "remove intersections from the two lists before saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords", "compile stocktwits data for stock prediction analysis in the following form (date, sentiment_calculated_bullish,", "scoring for each labelled message do 1) tokenize the message 2) perform POS", "get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor", "def get_price_data(cls, symbol): \"\"\" loads the price data of 'symbol' from data-extractor and", "as (Date, Opening Price, Closing Price, Volume) and return a combined frame as", "dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x))", "Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True)", "dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date())", "price data of 'symbol' from data-extractor and returns a pandas dataframe with columns", "dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL", "AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training,", "import numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL],", "row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for", "index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN =", "price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] =", "pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN", "if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if", "dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL',", "pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)", "from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN')", "index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL =", "\"\"\" file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN,", "== 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame", "first) 3) remove links, @ and $ references, extra whitespaces, extra '.', digits,", "or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1", "equal bearish and bullish messages \"\"\" try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame =", "ways: 1) extract message and datetime columns. 2) sort according to datetime in", "following ways: 1) extract message and datetime columns. 2) sort according to datetime", "@classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed data of 'symbol' from", "priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)]", "data of 'symbol' from data-extractor and returns a pandas dataframe with columns [Date(datetime64[ns]),", "'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else", "= dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file)", "priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if", "if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif", "inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location)", "= pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message']", "the word remove intersections from the two lists before saving \"\"\" dataFrame =", "== 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type ==", "try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish =", "= 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish =", "x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda", "import os.path import pandas as pd import re from nltk import word_tokenize, pos_tag", "dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv',", "'message']): \"\"\" preprocess the data in file location and saves it as a", "if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None,", "date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 =", "1) tokenize the message 2) perform POS tagging 3) if a sense is", "def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed data of 'symbol' from data-extractor", "LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True)", "x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): \"\"\" extract keywords from labelled stocktwits data", "stocktwits data for improved accuracy in scoring for each labelled message do 1)", "= word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for i in range(len(pos)): if", "- price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return", "data-extractor and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location =", "data of stocktwits from data-extractor and returns a pandas dataframe with columns [sentiment(object),", "keywords from labelled stocktwits data for improved accuracy in scoring for each labelled", "dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x:", "file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def", "dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): \"\"\" get the training and", "is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0])", "dataFrame @classmethod def aggregate_stock_price_data(cls): \"\"\" compile stocktwits data for stock prediction analysis in", "dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining,", "pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_', '", "accuracy in scoring for each labelled message do 1) tokenize the message 2)", "dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv',", "header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): \"\"\" get labelled training data", "== date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row", "1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame", "returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if", "words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False", "word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords:", "in scoring for each labelled message do 1) tokenize the message 2) perform", "= dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv',", "dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is", "date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row =", "dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'],", "LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL =", "pd import re from nltk import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet", "= pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'],", "get the training and test data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish,", "type='training'): \"\"\" get the training and test data for stock prediction in format", "1) extract message and datetime columns. 2) sort according to datetime in descending", "return dataFrame @classmethod def get_labelled_data(cls, type='complete'): \"\"\" get_labelled_data loads the preprocessed labelled data", "'_preprocessed' before '.csv). The preprocessing us in following ways: 1) extract message and", "pandas dataframe with columns [sentiment(object), message(object)]. \"\"\" if type == 'complete': file_location =", "i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v'))", "infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL =", "= pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\\.|https?://).*?(\\s|$)|@.*?(\\s|$)|\\$.*?(\\s|$)|\\d|\\%|\\\\|/|-|_',", "file_location, columns=['datetime', 'message']): \"\"\" preprocess the data in file location and saves it", "tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] =", "= pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish", "= pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'],", "day returns dataframes for AAPL, AMZN, GOOGL respectively \"\"\" if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and", "parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return", "= priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] ==", "while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1", "tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for i in range(len(pos)):", "of stocktwits from data-extractor and returns a pandas dataframe with columns [sentiment(object), message(object)].", "False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime']", "= set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows():", "get_custom_lexicon(cls): \"\"\" get custom lexicon of bearish and bullish words respectively \"\"\" file_location1", "bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file:", "data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize", "elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords", "labelled data of stocktwits from data-extractor and returns a pandas dataframe with columns", "dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training,", "saving \"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer =", "Price, Volume) and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume,", "updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a')", "labelled message do 1) tokenize the message 2) perform POS tagging 3) if", "selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words", "file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word", "= 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training':", "pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -=", "1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish']", "1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] ==", "os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x:", "dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns)", "Price(float64), Closing Price(float64), Volume(float64)]. \"\"\" file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening", "pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if", "dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file", "file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location)", "extra '.', digits, slashes, hyphons 4) decode html entities 5) convert everything to", "dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date')", "sentimentFrame as (date, sentiment, message) indexed by date and sentiment and priceFrame as", "def get_training_data(cls): \"\"\" get labelled training data with equal bearish and bullish messages", "def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the data in file location and", "= pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date]", "sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not in", "pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] =", "open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords: file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol):", "get custom lexicon of bearish and bullish words respectively \"\"\" file_location1 = 'data-extractor/lexicon_bearish_words.txt'", "header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls):", "sentiment and priceFrame as (Date, Opening Price, Closing Price, Volume) and return a", "= LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'],", "[message(object), datetime(datetime64[ns])]. \"\"\" file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame =", "[int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))])", "dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False) data =", "\"\"\" dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer = WordNetLemmatizer()", "dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL", "tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty:", "file.write(word+\"\\n\") @classmethod def get_stocktwits_data(cls, symbol): \"\"\" get_data loads the preprocessed data of 'symbol'", "= date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1", "'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'):", "case \"\"\" if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime',", "and loading of data. \"\"\" import html import os.path import pandas as pd", "slashes, hyphons 4) decode html entities 5) convert everything to lower case \"\"\"", "word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData:", "dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\?+',", "= dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining =", "= LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test =", "and remove stop words else ignore the word remove intersections from the two", "if a sense is present in wordnet then, lemmatize the word and remove", "= dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv',", "'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return", "type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message'])", "from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def", "- bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords: file.write(word+\"\\n\") with", "sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL)", "sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to take previous n days", "0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls):", "dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\\,+',", "a pandas dataframe with columns [sentiment(object), message(object)]. \"\"\" if type == 'complete': file_location", "class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): \"\"\" preprocess the data in", "price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue" ]
[ "TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a", "# Log a smaller dataframe to get more features before rotation sleep(sleep_interval) profiler.disable()", "Load the full lending club 1000 csv, to get a chance at hitting", "0, f\"feature counts are all empty, we expect some empty files with aggressive", "session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with", "larger dataframe to increase chance of rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2))", "from shutil import rmtree from time import sleep from typing import List import", "some empty files with aggressive log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts", "writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush intervals and", "file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path,", "Log a larger dataframe to increase chance of rotation before seeing all columns", "in file_names: full_file_path = os.path.join(root, file) output_files += [full_file_path] assert len(output_files) > 0,", "sleep from typing import List import pandas as pd import pytest from whylogs.app.config", "WriterConfig from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename):", "import sleep from typing import List import pandas as pd import pytest from", "if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json file but trying to", "data[0] for element in iter(data): assert first[0] == element[0], f\"Found differing feature counts:", "= get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile", "filename)) assert len(feature_counts) > 0, f\"feature counts are all empty, we expect some", "in os.walk(test_path): if not file_names: continue if subdir: for directory in subdir: for", "= [] for root, subdir, file_names in os.walk(test_path): if not file_names: continue if", "counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval", "bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has shape (1000,", "len(feature_counts) > 0, f\"feature counts are all empty, we expect some empty files", "pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) #", "import List import pandas as pd import pytest from whylogs.app.config import SessionConfig, WriterConfig", "session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test", "sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more features before rotation", "file but trying to open it to count features\") profile = get_json_profile(json_profile_filename) if", "with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a larger dataframe to", "get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as", "all empty, we expect some empty files with aggressive log rotation but not", "filename in output_files: feature_count = count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert", "times for _ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\")", "features before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for", "def assert_all_elements_equal(data: List): if not data or len(data) == 1: return True first", "profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir, file_names in", "get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile =", "filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club 1000 csv, to get a chance", "and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2 test_path =", "were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True) TEST_LOGGER.debug(f\"End cleaning up test directory", "f\"feature counts are all empty, we expect some empty files with aggressive log", "stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts = [] for filename in", "if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file: profile = json.load(profile_file)", "feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir):", "intervals and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as", "chance of rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller", "{len(output_files)} dataset summary files.\") feature_counts = [] for filename in output_files: feature_count =", "{log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"},", "os import pstats from logging import getLogger from shutil import rmtree from time", "in output_files: feature_count = count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts)", "directory in subdir: for file in file_names: full_file_path = os.path.join(root, directory, file) output_files", "import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename):", "= SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval}", "profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file:", "dataset summary files.\") feature_counts = [] for filename in output_files: feature_count = count_features(filename)", "empty files with aggressive log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts all", "appending to self 2 times for _ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test", "generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts = [] for", "if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature counts are", "len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size >", "not data or len(data) == 1: return True first = data[0] for element", "4x size by iteratively appending to self 2 times for _ in range(2):", "features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True) TEST_LOGGER.debug(f\"End", "are all empty, we expect some empty files with aggressive log rotation but", "test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts = [] for filename in output_files:", "files were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts =", "features\") profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def", "data or len(data) == 1: return True first = data[0] for element in", "shape (1000, 151) so create a test df with 4x size by iteratively", "profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename):", "file in file_names: full_file_path = os.path.join(root, file) output_files += [full_file_path] assert len(output_files) >", "TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True) TEST_LOGGER.debug(f\"End cleaning up test", "os.path.join(root, directory, file) output_files += [full_file_path] else: for file in file_names: full_file_path =", "assert_all_elements_equal(data: List): if not data or len(data) == 1: return True first =", "in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create a", "# full_df has shape (1000, 151) so create a test df with 4x", "files with aggressive log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts all same,", "from time import sleep from typing import List import pandas as pd import", "to count features\") profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return", "full_file_path = os.path.join(root, directory, file) output_files += [full_file_path] else: for file in file_names:", "pytest from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__))", "trying to open it to count features\") profile = get_json_profile(json_profile_filename) if profile and", "and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog:", "0, \"No output files were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary", "file_names: continue if subdir: for directory in subdir: for file in file_names: full_file_path", "differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load def", "df with 4x size by iteratively appending to self 2 times for _", "json file but trying to open it to count features\") profile = get_json_profile(json_profile_filename)", "output_files: feature_count = count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) >", "SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def", "= os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is", "script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename}", "if not data or len(data) == 1: return True first = data[0] for", "if not file_names: continue if subdir: for directory in subdir: for file in", "ylog: ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance of rotation before", "\"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\")", "# Create a whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session =", "as ylog: ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance of rotation", "return profile def assert_all_elements_equal(data: List): if not data or len(data) == 1: return", "= count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature", "we expect some empty files with aggressive log rotation but not all empty!\"", "= pd.read_csv(csv_path) # full_df has shape (1000, 151) so create a test df", "to get a chance at hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df", "= data[0] for element in iter(data): assert first[0] == element[0], f\"Found differing feature", "[\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club 1000 csv, to get", "as pd import pytest from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import session_from_config", "TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create a whylogs logging session session_config =", "= json.load(profile_file) return profile def assert_all_elements_equal(data: List): if not data or len(data) ==", "dataframe to increase chance of rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) #", "not file_names: continue if subdir: for directory in subdir: for file in file_names:", "from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if", "time import sleep from typing import List import pandas as pd import pytest", "0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature counts are all empty, we", "= [] for filename in output_files: feature_count = count_features(filename) if feature_count > 0:", "TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir, file_names in os.walk(test_path): if not file_names:", "WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club 1000 csv, to", "rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir,", "a test df with 4x size by iteratively appending to self 2 times", "def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config =", "feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature counts are all", "same, first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\")", "TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts = [] for filename in output_files: feature_count", "import pandas as pd import pytest from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session", "size by iteratively appending to self 2 times for _ in range(2): full_df", "so create a test df with 4x size by iteratively appending to self", "# Log a larger dataframe to increase chance of rotation before seeing all", "get a chance at hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df =", "assert len(output_files) > 0, \"No output files were generated during stress test\" TEST_LOGGER.debug(f\"Generated", "len(data) == 1: return True first = data[0] for element in iter(data): assert", "by iteratively appending to self 2 times for _ in range(2): full_df =", "= WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club 1000 csv,", "full_df has shape (1000, 151) so create a test df with 4x size", "element in iter(data): assert first[0] == element[0], f\"Found differing feature counts: {first[0]} vs", "import getLogger from shutil import rmtree from time import sleep from typing import", "to self 2 times for _ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe", "before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get", "of rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe", "output_files += [full_file_path] assert len(output_files) > 0, \"No output files were generated during", "cProfile import json import os import pstats from logging import getLogger from shutil", "first = data[0] for element in iter(data): assert first[0] == element[0], f\"Found differing", "has shape {full_df.shape}\") # Create a whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\",", "sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir, file_names", "full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create a whylogs logging", "= os.path.join(root, file) output_files += [full_file_path] assert len(output_files) > 0, \"No output files", "not a json file but trying to open it to count features\") profile", "or len(data) == 1: return True first = data[0] for element in iter(data):", "at hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df", "= pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir, file_names in os.walk(test_path): if", "ValueError(f\"{json_profile_filename} is not a json file but trying to open it to count", "for root, subdir, file_names in os.walk(test_path): if not file_names: continue if subdir: for", "True first = data[0] for element in iter(data): assert first[0] == element[0], f\"Found", "(1000, 151) so create a test df with 4x size by iteratively appending", "if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile = {}", "in iter(data): assert first[0] == element[0], f\"Found differing feature counts: {first[0]} vs {element[0]}", "a whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running", "output_files += [full_file_path] else: for file in file_names: full_file_path = os.path.join(root, file) output_files", "logging import getLogger from shutil import rmtree from time import sleep from typing", "{element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\")", "os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file: profile = json.load(profile_file) return", "aggressive log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file", "empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were", "a json file but trying to open it to count features\") profile =", "assert len(feature_counts) > 0, f\"feature counts are all empty, we expect some empty", "full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create a whylogs logging session session_config", "feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature counts are all empty, we expect", "has shape (1000, 151) so create a test df with 4x size by", "{first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2 test_path", "full_df = pd.read_csv(csv_path) # full_df has shape (1000, 151) so create a test", "counts all same, first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files", "= getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json", "whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER =", "SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush", "151) so create a test df with 4x size by iteratively appending to", "test with {log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with", "a chance at hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path)", "lending club 1000 csv, to get a chance at hitting the bug. csv_path", "{len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True) TEST_LOGGER.debug(f\"End cleaning up test directory {test_path}\")", "Log a smaller dataframe to get more features before rotation sleep(sleep_interval) profiler.disable() stats", "0: with open(json_profile_filename) as profile_file: profile = json.load(profile_file) return profile def assert_all_elements_equal(data: List):", "session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler", "+= [full_file_path] assert len(output_files) > 0, \"No output files were generated during stress", "os.path.join(root, file) output_files += [full_file_path] assert len(output_files) > 0, \"No output files were", "from typing import List import pandas as pd import pytest from whylogs.app.config import", "to get more features before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files", "import SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__)", "first[0] == element[0], f\"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]}", "json import os import pstats from logging import getLogger from shutil import rmtree", "[full_file_path] assert len(output_files) > 0, \"No output files were generated during stress test\"", "columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more features before", "full_file_path = os.path.join(root, file) output_files += [full_file_path] assert len(output_files) > 0, \"No output", "1000 csv, to get a chance at hitting the bug. csv_path = os.path.join(script_dir,", "in files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval =", "profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a larger dataframe", "f\"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load", "{full_df.shape}\") # Create a whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session", "profile = json.load(profile_file) return profile def assert_all_elements_equal(data: List): if not data or len(data)", "subdir: for file in file_names: full_file_path = os.path.join(root, directory, file) output_files += [full_file_path]", "not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json file but trying to open", "2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the", "List): if not data or len(data) == 1: return True first = data[0]", "pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir, file_names in os.walk(test_path): if not", "all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more features", "import os import pstats from logging import getLogger from shutil import rmtree from", "whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not", "stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root, subdir, file_names in os.walk(test_path):", "= \"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(),", "output files were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts", "not all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file with features was {feature_counts[0]}\")", "whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate", "[full_file_path] else: for file in file_names: full_file_path = os.path.join(root, file) output_files += [full_file_path]", "pd.read_csv(csv_path) # full_df has shape (1000, 151) so create a test df with", "for file in file_names: full_file_path = os.path.join(root, directory, file) output_files += [full_file_path] else:", "but trying to open it to count features\") profile = get_json_profile(json_profile_filename) if profile", "{} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file: profile =", "seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more", "subdir, file_names in os.walk(test_path): if not file_names: continue if subdir: for directory in", "TEST_LOGGER.info(f\"Feature counts all same, first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)}", "os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has shape (1000, 151) so create", "# Load the full lending club 1000 csv, to get a chance at", "open(json_profile_filename) as profile_file: profile = json.load(profile_file) return profile def assert_all_elements_equal(data: List): if not", "writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club 1000", "profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and", "count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json file but trying", "logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log", "+= [full_file_path] else: for file in file_names: full_file_path = os.path.join(root, file) output_files +=", "= cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a", "= full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create a whylogs logging session", "flush intervals and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval)", "to increase chance of rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log", "sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") #", "with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True)", "session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise", "as profile_file: profile = json.load(profile_file) return profile def assert_all_elements_equal(data: List): if not data", "a larger dataframe to increase chance of rotation before seeing all columns sleep(sleep_interval)", "_ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create", "rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file with features", "profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile = {} if", "import rmtree from time import sleep from typing import List import pandas as", "== element[0], f\"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and", "during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts = [] for filename", "0 def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with", "the full lending club 1000 csv, to get a chance at hitting the", "for directory in subdir: for file in file_names: full_file_path = os.path.join(root, directory, file)", "> 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature counts are all empty,", "get more features before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files =", "Create a whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config]) session = session_from_config(session_config)", "test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\",", "file_names in os.walk(test_path): if not file_names: continue if subdir: for directory in subdir:", "full lending club 1000 csv, to get a chance at hitting the bug.", "profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log", "dataframe to get more features before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10))", "os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json file but trying to open it", "file) output_files += [full_file_path] assert len(output_files) > 0, \"No output files were generated", "def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename)", "hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has", "feature_count = count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0,", "pandas as pd import pytest from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import", "counts are all empty, we expect some empty files with aggressive log rotation", "raise ValueError(f\"{json_profile_filename} is not a json file but trying to open it to", "continue if subdir: for directory in subdir: for file in file_names: full_file_path =", "element[0], f\"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}\"", "test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full", "\"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has shape (1000, 151) so create a", "output_files = [] for root, subdir, file_names in os.walk(test_path): if not file_names: continue", "subdir: for directory in subdir: for file in file_names: full_file_path = os.path.join(root, directory,", "feature_counts = [] for filename in output_files: feature_count = count_features(filename) if feature_count >", "rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler = cProfile.Profile()", "= 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load", "log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file with", "rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to", "summary files.\") feature_counts = [] for filename in output_files: feature_count = count_features(filename) if", "= tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending", "for filename in output_files: feature_count = count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename))", "files.\") feature_counts = [] for filename in output_files: feature_count = count_features(filename) if feature_count", "from logging import getLogger from shutil import rmtree from time import sleep from", "os.walk(test_path): if not file_names: continue if subdir: for directory in subdir: for file", "profile_file: profile = json.load(profile_file) return profile def assert_all_elements_equal(data: List): if not data or", "and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename)", "import cProfile import json import os import pstats from logging import getLogger from", "all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There", "directory, file) output_files += [full_file_path] else: for file in file_names: full_file_path = os.path.join(root,", "session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a larger dataframe to increase", "expect some empty files with aggressive log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature", "smaller dataframe to get more features before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\")", "in subdir: for file in file_names: full_file_path = os.path.join(root, directory, file) output_files +=", "file in file_names: full_file_path = os.path.join(root, directory, file) output_files += [full_file_path] else: for", "= {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file: profile", "range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") # Create a whylogs", "\"No output files were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\")", "= os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has shape (1000, 151) so", "files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2", "increase chance of rotation before seeing all columns sleep(sleep_interval) ylog.log_dataframe(full_df.head(n=2)) # Log a", "it to count features\") profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys())", "cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a larger", "return True first = data[0] for element in iter(data): assert first[0] == element[0],", "before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = [] for root,", "log_rotation_interval = \"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"],", "vs {element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\"", "else: for file in file_names: full_file_path = os.path.join(root, file) output_files += [full_file_path] assert", "the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has shape", "{element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval", "for _ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape {full_df.shape}\") #", "with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance of", "file_names: full_file_path = os.path.join(root, file) output_files += [full_file_path] assert len(output_files) > 0, \"No", "file_names: full_file_path = os.path.join(root, directory, file) output_files += [full_file_path] else: for file in", "@pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval = \"1s\" sleep_interval = 2 test_path = tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config", "ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance of rotation before seeing", "[] for filename in output_files: feature_count = count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename),", "and os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file: profile = json.load(profile_file) return profile", "[] for root, subdir, file_names in os.walk(test_path): if not file_names: continue if subdir:", "rmtree from time import sleep from typing import List import pandas as pd", "self 2 times for _ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has", "os.stat(json_profile_filename).st_size > 0: with open(json_profile_filename) as profile_file: profile = json.load(profile_file) return profile def", "iteratively appending to self 2 times for _ in range(2): full_df = full_df.append(full_df)", "log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable()", "import json import os import pstats from logging import getLogger from shutil import", "were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset summary files.\") feature_counts = []", "first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts)", "> 0: with open(json_profile_filename) as profile_file: profile = json.load(profile_file) return profile def assert_all_elements_equal(data:", "club 1000 csv, to get a chance at hitting the bug. csv_path =", "{sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\": \"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df)", "to open it to count features\") profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"):", "<filename>tests/component/test_performance_log_dataframe.py import cProfile import json import os import pstats from logging import getLogger", "return len(profile[\"columns\"].keys()) return 0 def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size", "dataframe has shape {full_df.shape}\") # Create a whylogs logging session session_config = SessionConfig(\"project\",", "== 1: return True first = data[0] for element in iter(data): assert first[0]", "open it to count features\") profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return", "with 4x size by iteratively appending to self 2 times for _ in", "in file_names: full_file_path = os.path.join(root, directory, file) output_files += [full_file_path] else: for file", "all same, first file with features was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with", "more features before rotation sleep(sleep_interval) profiler.disable() stats = pstats.Stats(profiler).sort_stats(\"cumulative\") TEST_LOGGER.info(stats.print_stats(10)) output_files = []", "\"model-1\"}, with_rotation_time=log_rotation_interval) as ylog: ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance", "file) output_files += [full_file_path] else: for file in file_names: full_file_path = os.path.join(root, file)", "root, subdir, file_names in os.walk(test_path): if not file_names: continue if subdir: for directory", "with aggressive log rotation but not all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first", "2 times for _ in range(2): full_df = full_df.append(full_df) TEST_LOGGER.info(f\"test dataframe has shape", "empty, we expect some empty files with aggressive log rotation but not all", "assert first[0] == element[0], f\"Found differing feature counts: {first[0]} vs {element[0]} in files", "shutil import rmtree from time import sleep from typing import List import pandas", "csv, to get a chance at hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\")", "= os.path.join(root, directory, file) output_files += [full_file_path] else: for file in file_names: full_file_path", "session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s", "import pytest from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir =", "pstats from logging import getLogger from shutil import rmtree from time import sleep", "with open(json_profile_filename) as profile_file: profile = json.load(profile_file) return profile def assert_all_elements_equal(data: List): if", "{first[0]} vs {element[0]} in files {first[1]} and {element[1]}\" @pytest.mark.load def test_log_rotation_concurrency(tmpdir): log_rotation_interval =", "tmpdir.mkdir(\"log_rotation_concurrency_repro\") writer_config = WriterConfig(\"local\", [\"json\"], test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club", "is not a json file but trying to open it to count features\")", "was {feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True) TEST_LOGGER.debug(f\"End cleaning", "from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir = os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER", "1: return True first = data[0] for element in iter(data): assert first[0] ==", "len(output_files) > 0, \"No output files were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)}", "a smaller dataframe to get more features before rotation sleep(sleep_interval) profiler.disable() stats =", "for file in file_names: full_file_path = os.path.join(root, file) output_files += [full_file_path] assert len(output_files)", "for element in iter(data): assert first[0] == element[0], f\"Found differing feature counts: {first[0]}", "return 0 def get_json_profile(json_profile_filename): profile = {} if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0:", "TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler =", "{feature_counts[0]}\") TEST_LOGGER.debug(f\"There were {len(feature_counts)} files with features.\") assert_all_elements_equal(feature_counts) rmtree(test_path, ignore_errors=True) TEST_LOGGER.debug(f\"End cleaning up", "ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more features before rotation sleep(sleep_interval)", "chance at hitting the bug. csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) #", "profile def assert_all_elements_equal(data: List): if not data or len(data) == 1: return True", "os.path.dirname(os.path.realpath(__file__)) TEST_LOGGER = getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not", "create a test df with 4x size by iteratively appending to self 2", "but not all empty!\" TEST_LOGGER.info(f\"Feature counts all same, first file with features was", "List import pandas as pd import pytest from whylogs.app.config import SessionConfig, WriterConfig from", "typing import List import pandas as pd import pytest from whylogs.app.config import SessionConfig,", "import pstats from logging import getLogger from shutil import rmtree from time import", "= session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause\")", "getLogger(__name__) def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json file", "json.load(profile_file) return profile def assert_all_elements_equal(data: List): if not data or len(data) == 1:", "with {log_rotation_interval} flush intervals and {sleep_interval}s pause\") profiler = cProfile.Profile() profiler.enable() with session.logger(tags={\"datasetId\":", "count features\") profile = get_json_profile(json_profile_filename) if profile and profile.get(\"columns\"): return len(profile[\"columns\"].keys()) return 0", "if subdir: for directory in subdir: for file in file_names: full_file_path = os.path.join(root,", "getLogger from shutil import rmtree from time import sleep from typing import List", "iter(data): assert first[0] == element[0], f\"Found differing feature counts: {first[0]} vs {element[0]} in", "> 0, f\"feature counts are all empty, we expect some empty files with", "pd import pytest from whylogs.app.config import SessionConfig, WriterConfig from whylogs.app.session import session_from_config script_dir", "> 0, \"No output files were generated during stress test\" TEST_LOGGER.debug(f\"Generated {len(output_files)} dataset", "def count_features(json_profile_filename): if not os.path.isfile(json_profile_filename): raise ValueError(f\"{json_profile_filename} is not a json file but", "test_path.realpath(), filename_template=\"dataset_summary-$dataset_timestamp\") # Load the full lending club 1000 csv, to get a", "test df with 4x size by iteratively appending to self 2 times for", "count_features(filename) if feature_count > 0: feature_counts.append((count_features(filename), filename)) assert len(feature_counts) > 0, f\"feature counts", "\"pipeline\", writers=[writer_config]) session = session_from_config(session_config) TEST_LOGGER.info(f\"Running rotate log test with {log_rotation_interval} flush intervals", "shape {full_df.shape}\") # Create a whylogs logging session session_config = SessionConfig(\"project\", \"pipeline\", writers=[writer_config])", "csv_path = os.path.join(script_dir, \"lending_club_1000.csv\") full_df = pd.read_csv(csv_path) # full_df has shape (1000, 151)" ]
[ "and the following disclaimer in the documentation # and/or other materials provided with", "SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db import models class", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT,", "# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #", "DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "Meta: app_label = 'thing' ordering = ('name',) def __unicode__(self): if hasattr(self.user, 'username'): return", "if hasattr(self.user, 'username'): return '%s - %s' % (self.user.username, self.name) else: return '%s'", "EVEthing team # All rights reserved. # # Redistribution and use in source", "('name',) def __unicode__(self): if hasattr(self.user, 'username'): return '%s - %s' % (self.user.username, self.name)", "documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE", "notice, this # list of conditions and the following disclaimer. # Redistributions in", "THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User from", "ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "in binary form must reproduce the above copyright notice, # this list of", "THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY,", "the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "the documentation # and/or other materials provided with the distribution. # # THIS", "All rights reserved. # # Redistribution and use in source and binary forms,", "99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user =", "ordering = ('name',) def __unicode__(self): if hasattr(self.user, 'username'): return '%s - %s' %", "ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT #", "this list of conditions and the following disclaimer in the documentation # and/or", "of conditions and the following disclaimer in the documentation # and/or other materials", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY", "POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT,", "met: # # Redistributions of source code must retain the above copyright notice,", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "this # list of conditions and the following disclaimer. # Redistributions in binary", "BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "use in source and binary forms, with or without modification, # are permitted", "binary forms, with or without modification, # are permitted provided that the following", "are permitted provided that the following conditions are met: # # Redistributions of", "CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY,", "= models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta:", "# Copyright (c) 2010-2013, EVEthing team # All rights reserved. # # Redistribution", "list of conditions and the following disclaimer in the documentation # and/or other", "the following conditions are met: # # Redistributions of source code must retain", "HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN", "LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "and binary forms, with or without modification, # are permitted provided that the", "# list of conditions and the following disclaimer. # Redistributions in binary form", "# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #", "OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS", "DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db import models class SkillPlan(models.Model):", "# this list of conditions and the following disclaimer in the documentation #", "app_label = 'thing' ordering = ('name',) def __unicode__(self): if hasattr(self.user, 'username'): return '%s", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED", "OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "# OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db import", "models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering = ('name',) def __unicode__(self): if", "AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY", "code must retain the above copyright notice, this # list of conditions and", "THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND #", "of conditions and the following disclaimer. # Redistributions in binary form must reproduce", "and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS", "(c) 2010-2013, EVEthing team # All rights reserved. # # Redistribution and use", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY #", "name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering", "Redistributions in binary form must reproduce the above copyright notice, # this list", "with or without modification, # are permitted provided that the following conditions are", "\"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "provided that the following conditions are met: # # Redistributions of source code", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "must reproduce the above copyright notice, # this list of conditions and the", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN", "3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'),", "BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "# All rights reserved. # # Redistribution and use in source and binary", "and the following disclaimer. # Redistributions in binary form must reproduce the above", "OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db import models", "EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "disclaimer in the documentation # and/or other materials provided with the distribution. #", "from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY", "# are permitted provided that the following conditions are met: # # Redistributions", "the following disclaimer. # Redistributions in binary form must reproduce the above copyright", "'Global'), ) user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1,", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "of source code must retain the above copyright notice, this # list of", "( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True, blank=True)", "# ------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY", "PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF", "CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "= 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT", "(PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True, blank=True) name", "DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "following conditions are met: # # Redistributions of source code must retain the", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT", "# ------------------------------------------------------------------------------ # Copyright (c) 2010-2013, EVEthing team # All rights reserved. #", "forms, with or without modification, # are permitted provided that the following conditions", "AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR", "from django.contrib.auth.models import User from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1", "notice, # this list of conditions and the following disclaimer in the documentation", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, #", "other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY", "ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import", "import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3", "IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models", "# # Redistributions of source code must retain the above copyright notice, this", "1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = (", "(GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility =", "AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF", ") user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES)", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE", "choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering = ('name',) def __unicode__(self): if hasattr(self.user,", "in the documentation # and/or other materials provided with the distribution. # #", "conditions are met: # # Redistributions of source code must retain the above", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY", "PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "disclaimer. # Redistributions in binary form must reproduce the above copyright notice, #", "def __unicode__(self): if hasattr(self.user, 'username'): return '%s - %s' % (self.user.username, self.name) else:", "django.contrib.auth.models import User from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY", "2010-2013, EVEthing team # All rights reserved. # # Redistribution and use in", "= ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True,", "reserved. # # Redistribution and use in source and binary forms, with or", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF", "class Meta: app_label = 'thing' ordering = ('name',) def __unicode__(self): if hasattr(self.user, 'username'):", "Redistribution and use in source and binary forms, with or without modification, #", "(INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE", "list of conditions and the following disclaimer. # Redistributions in binary form must", "following disclaimer. # Redistributions in binary form must reproduce the above copyright notice,", "copyright notice, this # list of conditions and the following disclaimer. # Redistributions", "OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "'thing' ordering = ('name',) def __unicode__(self): if hasattr(self.user, 'username'): return '%s - %s'", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "above copyright notice, this # list of conditions and the following disclaimer. #", "the above copyright notice, # this list of conditions and the following disclaimer", "(PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64)", "and use in source and binary forms, with or without modification, # are", "Copyright (c) 2010-2013, EVEthing team # All rights reserved. # # Redistribution and", "LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "(INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES,", "Redistributions of source code must retain the above copyright notice, this # list", "django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY =", "SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99", "BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "source code must retain the above copyright notice, this # list of conditions", "user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class", "NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH", "= 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY,", "retain the above copyright notice, this # list of conditions and the following", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO,", "MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), )", "# and/or other materials provided with the distribution. # # THIS SOFTWARE IS", "OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "in source and binary forms, with or without modification, # are permitted provided", "= 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES =", "GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'),", "VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User,", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #", "ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "rights reserved. # # Redistribution and use in source and binary forms, with", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "------------------------------------------------------------------------------ # Copyright (c) 2010-2013, EVEthing team # All rights reserved. # #", "OF THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------ from django.contrib.auth.models import User", "PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "= models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering =", "# Redistributions in binary form must reproduce the above copyright notice, # this", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED", "# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #", "LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "= 'thing' ordering = ('name',) def __unicode__(self): if hasattr(self.user, 'username'): return '%s -", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR", "HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR", "DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "without modification, # are permitted provided that the following conditions are met: #", "DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT", "------------------------------------------------------------------------------ from django.contrib.auth.models import User from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY =", "import User from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY =", "reproduce the above copyright notice, # this list of conditions and the following", "# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #", "A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER", "source and binary forms, with or without modification, # are permitted provided that", "distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS", "with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT", "permitted provided that the following conditions are met: # # Redistributions of source", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "'Private'), (PUBLIC_VISIBILITY, 'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True, blank=True) name =", "that the following conditions are met: # # Redistributions of source code must", "hasattr(self.user, 'username'): return '%s - %s' % (self.user.username, self.name) else: return '%s' %", "binary form must reproduce the above copyright notice, # this list of conditions", "conditions and the following disclaimer in the documentation # and/or other materials provided", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO", "User from django.db import models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2", "or without modification, # are permitted provided that the following conditions are met:", "IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN", "following disclaimer in the documentation # and/or other materials provided with the distribution.", "2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'), (PUBLIC_VISIBILITY,", "LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE.", "form must reproduce the above copyright notice, # this list of conditions and", "modification, # are permitted provided that the following conditions are met: # #", "# Redistribution and use in source and binary forms, with or without modification,", "= ('name',) def __unicode__(self): if hasattr(self.user, 'username'): return '%s - %s' % (self.user.username,", "team # All rights reserved. # # Redistribution and use in source and", "above copyright notice, # this list of conditions and the following disclaimer in", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. #", "'Public'), (GLOBAL_VISIBILITY, 'Global'), ) user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=64) visibility", "= models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering = ('name',) def __unicode__(self):", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS", "GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION)", "AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY", "'username'): return '%s - %s' % (self.user.username, self.name) else: return '%s' % self.name", "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL,", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT", "USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "models class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY", "__unicode__(self): if hasattr(self.user, 'username'): return '%s - %s' % (self.user.username, self.name) else: return", "must retain the above copyright notice, this # list of conditions and the", "IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT", "# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR", "the following disclaimer in the documentation # and/or other materials provided with the", "materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS;", "SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF", "# # Redistribution and use in source and binary forms, with or without", "OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # ------------------------------------------------------------------------------", "blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing'", "copyright notice, # this list of conditions and the following disclaimer in the", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering = ('name',) def", "are met: # # Redistributions of source code must retain the above copyright", "= 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY = 99 VISIBILITY_CHOICES = ( (PRIVATE_VISIBILITY, 'Private'),", "conditions and the following disclaimer. # Redistributions in binary form must reproduce the", "models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label = 'thing' ordering = ('name',)", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL,", "the above copyright notice, this # list of conditions and the following disclaimer.", "null=True, blank=True) name = models.CharField(max_length=64) visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES) class Meta: app_label =", "# Redistributions of source code must retain the above copyright notice, this #", "class SkillPlan(models.Model): PRIVATE_VISIBILITY = 1 PUBLIC_VISIBILITY = 2 GLOBAL_VISIBILITY = 3 MASTERY_VISIBILITY =", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING" ]
[ "arr def gap_insertion_sort(arr, start, gap): n = len(arr) for i in range(start, n,", "n//2 while gap >= 1: for start in range(gap): gap_insertion_sort(arr, start, gap) gap", "# space complexity O(1) def shell_sort(arr): n = len(arr) gap = n//2 while", "gap_insertion_sort(arr, start, gap) gap = gap//2 return arr def gap_insertion_sort(arr, start, gap): n", "O(1) def shell_sort(arr): n = len(arr) gap = n//2 while gap >= 1:", "n = len(arr) for i in range(start, n, gap): j = i -", "class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]); if __name__ == \"__main__\":", "unittest # time complexity O(n**2) # space complexity O(1) def shell_sort(arr): n =", "= n//2 while gap >= 1: for start in range(gap): gap_insertion_sort(arr, start, gap)", "= i - gap while (j >= start) and (arr[i] < arr[j]): arr[i],", "space complexity O(1) def shell_sort(arr): n = len(arr) gap = n//2 while gap", "gap = n//2 while gap >= 1: for start in range(gap): gap_insertion_sort(arr, start,", "len(arr) for i in range(start, n, gap): j = i - gap while", "(j >= start) and (arr[i] < arr[j]): arr[i], arr[j] = arr[j], arr[i] i", "arr[i], arr[j] = arr[j], arr[i] i = j j -= gap class Test(unittest.TestCase):", "(arr[i] < arr[j]): arr[i], arr[j] = arr[j], arr[i] i = j j -=", "-= gap class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]); if __name__", "complexity O(1) def shell_sort(arr): n = len(arr) gap = n//2 while gap >=", "import unittest # time complexity O(n**2) # space complexity O(1) def shell_sort(arr): n", "return arr def gap_insertion_sort(arr, start, gap): n = len(arr) for i in range(start,", "arr[j]): arr[i], arr[j] = arr[j], arr[i] i = j j -= gap class", "arr[j], arr[i] i = j j -= gap class Test(unittest.TestCase): def test_shell_sort(self): arr", "j j -= gap class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]);", "O(n**2) # space complexity O(1) def shell_sort(arr): n = len(arr) gap = n//2", "def gap_insertion_sort(arr, start, gap): n = len(arr) for i in range(start, n, gap):", "gap): j = i - gap while (j >= start) and (arr[i] <", "< arr[j]): arr[i], arr[j] = arr[j], arr[i] i = j j -= gap", "while gap >= 1: for start in range(gap): gap_insertion_sort(arr, start, gap) gap =", "gap = gap//2 return arr def gap_insertion_sort(arr, start, gap): n = len(arr) for", "- gap while (j >= start) and (arr[i] < arr[j]): arr[i], arr[j] =", "gap//2 return arr def gap_insertion_sort(arr, start, gap): n = len(arr) for i in", "= len(arr) for i in range(start, n, gap): j = i - gap", "range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2 return arr def gap_insertion_sort(arr, start, gap):", "while (j >= start) and (arr[i] < arr[j]): arr[i], arr[j] = arr[j], arr[i]", "start) and (arr[i] < arr[j]): arr[i], arr[j] = arr[j], arr[i] i = j", "and (arr[i] < arr[j]): arr[i], arr[j] = arr[j], arr[i] i = j j", "arr[j] = arr[j], arr[i] i = j j -= gap class Test(unittest.TestCase): def", "shell_sort(arr): n = len(arr) gap = n//2 while gap >= 1: for start", "time complexity O(n**2) # space complexity O(1) def shell_sort(arr): n = len(arr) gap", "n = len(arr) gap = n//2 while gap >= 1: for start in", "for i in range(start, n, gap): j = i - gap while (j", "i in range(start, n, gap): j = i - gap while (j >=", "len(arr) gap = n//2 while gap >= 1: for start in range(gap): gap_insertion_sort(arr,", ">= start) and (arr[i] < arr[j]): arr[i], arr[j] = arr[j], arr[i] i =", "gap while (j >= start) and (arr[i] < arr[j]): arr[i], arr[j] = arr[j],", "in range(start, n, gap): j = i - gap while (j >= start)", "n, gap): j = i - gap while (j >= start) and (arr[i]", "= j j -= gap class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr),", "i = j j -= gap class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6]", "start, gap): n = len(arr) for i in range(start, n, gap): j =", "range(start, n, gap): j = i - gap while (j >= start) and", "1: for start in range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2 return arr", "start in range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2 return arr def gap_insertion_sort(arr,", "= arr[j], arr[i] i = j j -= gap class Test(unittest.TestCase): def test_shell_sort(self):", "arr[i] i = j j -= gap class Test(unittest.TestCase): def test_shell_sort(self): arr =", "= len(arr) gap = n//2 while gap >= 1: for start in range(gap):", "gap >= 1: for start in range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2", "= gap//2 return arr def gap_insertion_sort(arr, start, gap): n = len(arr) for i", "i - gap while (j >= start) and (arr[i] < arr[j]): arr[i], arr[j]", "for start in range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2 return arr def", "gap) gap = gap//2 return arr def gap_insertion_sort(arr, start, gap): n = len(arr)", "complexity O(n**2) # space complexity O(1) def shell_sort(arr): n = len(arr) gap =", "# time complexity O(n**2) # space complexity O(1) def shell_sort(arr): n = len(arr)", "gap_insertion_sort(arr, start, gap): n = len(arr) for i in range(start, n, gap): j", "Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]); if __name__ == \"__main__\": unittest.main()", ">= 1: for start in range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2 return", "in range(gap): gap_insertion_sort(arr, start, gap) gap = gap//2 return arr def gap_insertion_sort(arr, start,", "def shell_sort(arr): n = len(arr) gap = n//2 while gap >= 1: for", "start, gap) gap = gap//2 return arr def gap_insertion_sort(arr, start, gap): n =", "gap): n = len(arr) for i in range(start, n, gap): j = i", "j -= gap class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]); if", "gap class Test(unittest.TestCase): def test_shell_sort(self): arr = [3,6,9,7,8,4,2,5,1,9,6] self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]); if __name__ ==", "j = i - gap while (j >= start) and (arr[i] < arr[j]):" ]
[ "5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert parameter.min_date == datetime(2015,", "parameter.max == 12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name =", "np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0)", "code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.mean ==", "\"\"\" test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015 by <NAME>, see AUTHORS", "see AUTHORS :license: United States Geological Survey (USGS), see LICENSE file \"\"\" import", "5, 0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily):", "105, 107, 112]), units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\")", "second (Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter", "8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates =", "(Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.max_date", "\"cubic feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3,", "datetime import datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name =", "assert parameter.max == 5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name", "second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, np.nan, 12])) assert", "values = np.array([100, 110, 105, 107, 112]), units = \"cubic feet per second", ":license: United States Geological Survey (USGS), see LICENSE file \"\"\" import pytest import", "United States Geological Survey (USGS), see LICENSE file \"\"\" import pytest import os", "test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet", "= \"cubic feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2,", "per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, np.nan, 4, 5]))", "per second (Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily):", "parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max == 12.0", "assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name", "parameter.mean == 3.0 assert parameter.max == 5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily):", "assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name", "import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, values", "# -*- coding: utf-8 -*- \"\"\" test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright:", "dates = dates_daily, units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\",", "datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates", "105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily,", "units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) ==", "\"06_00060_00003\", values = np.array([1, 2, 3, np.nan, 12])) assert parameter.mean == 4.5 #", "3, 4, 5])) assert parameter.mean == 3.0 assert parameter.max == 5.0 assert parameter.min", "def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, values = np.array([100,", "= np.array([1, 2, 3, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0,", "from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates =", "units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1,", "values = np.array([1, 2, 3, 4, 5])) assert parameter.mean == 3.0 assert parameter.max", "parameter.units == \"cubic feet per second (Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105,", "== \"cubic feet per second (Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105, 107,", "list(parameter.values) == list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name =", "def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic", "= \"06_00060_00003\", values = np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date == datetime(2015,", "Survey (USGS), see LICENSE file \"\"\" import pytest import os import numpy as", "parameter.max == 5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name =", "== 12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\",", "test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet", "feet per second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code", "= dates_daily, values = np.array([100, 110, 105, 107, 112]), units = \"cubic feet", "3.0 assert parameter.max == 5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter =", "3, np.nan, 12])) assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5", "== 3.0 assert parameter.max == 5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter", "assert parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units == \"cubic feet", "(Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter =", "def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic", "see LICENSE file \"\"\" import pytest import os import numpy as np from", "dates_daily, values = np.array([100, 110, 105, 107, 112]), units = \"cubic feet per", "== 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units", "2, 3, np.nan, 12])) assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 =", "1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily,", "= Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet per second", "107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units", "import os import numpy as np from datetime import datetime from gagepy.parameter import", "def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic", "list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units == \"cubic", "110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates =", "by <NAME>, see AUTHORS :license: United States Geological Survey (USGS), see LICENSE file", "<NAME>, see AUTHORS :license: United States Geological Survey (USGS), see LICENSE file \"\"\"", "import datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\",", "from datetime import datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name", "datetime(2015, 8, 5, 0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0)", "Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet per second (Mean)\",", "Parameter(name = \"Discharge\", dates = dates_daily, values = np.array([100, 110, 105, 107, 112]),", "\"cubic feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, np.nan,", "assert parameter.units == \"cubic feet per second (Mean)\" assert list(parameter.values) == list(np.array([100, 110,", "np.nan, 12])) assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5 assert", "\"06_00060_00003\", values = np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date == datetime(2015, 8,", "list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units", "5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates", "dates = dates_daily, values = np.array([100, 110, 105, 107, 112]), units = \"cubic", "parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name =", "18/4 = 4.5 assert parameter.max == 12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily):", "assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates =", "second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) print(parameter)", "test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet", "np.array([100, 110, 105, 107, 112]), units = \"cubic feet per second (Mean)\", code", "0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units =", "import numpy as np from datetime import datetime from gagepy.parameter import Parameter def", "Tests for `gagepy.parameter` class :copyright: 2015 by <NAME>, see AUTHORS :license: United States", "2, 3, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert", "for `gagepy.parameter` class :copyright: 2015 by <NAME>, see AUTHORS :license: United States Geological", "2015 by <NAME>, see AUTHORS :license: United States Geological Survey (USGS), see LICENSE", "dates_daily, units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\", values =", "test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet", "= np.array([1, 2, 3, np.nan, 12])) assert parameter.mean == 4.5 # sum(values)/len(values) ->", "= np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0,", "4.5 assert parameter.max == 12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter =", "== datetime(2015, 8, 5, 0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0,", "import pytest import os import numpy as np from datetime import datetime from", "Geological Survey (USGS), see LICENSE file \"\"\" import pytest import os import numpy", "assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert parameter.min_date == datetime(2015, 8,", "4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert parameter.min_date ==", "\"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.max_date == datetime(2015, 8,", "(Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.mean", "parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units == \"cubic feet per", "assert parameter.mean == 3.0 assert parameter.max == 5.0 assert parameter.min == 1.0 def", "feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, 4,", "5, 0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily):", "0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter =", "def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic", "sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max == 12.0 assert parameter.min == 1.0", "\"cubic feet per second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert", "parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet per", "numpy as np from datetime import datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily):", "assert parameter.name == \"Discharge\" assert parameter.units == \"cubic feet per second (Mean)\" assert", "0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter", "(Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date", "code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name", ":copyright: 2015 by <NAME>, see AUTHORS :license: United States Geological Survey (USGS), see", "file \"\"\" import pytest import os import numpy as np from datetime import", "= 4.5 assert parameter.max == 12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter", "assert list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\" assert", "parameter.min == 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily,", "assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates =", "code = \"06_00060_00003\", values = np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date ==", "= np.array([100, 110, 105, 107, 112]), units = \"cubic feet per second (Mean)\",", "assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name", "1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily,", "Parameter def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, values =", "# sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max == 12.0 assert parameter.min ==", "\"cubic feet per second (Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112]))", "8, 5, 0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def", "per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5]))", "\"Discharge\", dates = dates_daily, values = np.array([100, 110, 105, 107, 112]), units =", "coding: utf-8 -*- \"\"\" test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015 by", "\"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\"", "= \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.mean == 3.0", "test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, values = np.array([100, 110,", "0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units =", "np.array([1, 2, 3, np.nan, 12])) assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4", "datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates", "1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units =", "feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, np.nan, 4,", "1.0 def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units =", "def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic", "\"Discharge\" assert parameter.units == \"cubic feet per second (Mean)\" assert list(parameter.values) == list(np.array([100,", "(USGS), see LICENSE file \"\"\" import pytest import os import numpy as np", "code = \"06_00060_00003\", values = np.array([1, 2, 3, np.nan, 12])) assert parameter.mean ==", "= Parameter(name = \"Discharge\", dates = dates_daily, values = np.array([100, 110, 105, 107,", "== 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units", "= \"cubic feet per second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily)", "0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units", "2, np.nan, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert", "LICENSE file \"\"\" import pytest import os import numpy as np from datetime", "feet per second (Mean)\" assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112])) def", "parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name =", "(Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, np.nan, 12])) assert parameter.mean", "values = np.array([1, 2, 3, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5,", "assert parameter.max == 12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name", "(Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\" assert", "per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, np.nan, 12]))", "5])) assert parameter.mean == 3.0 assert parameter.max == 5.0 assert parameter.min == 1.0", "~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015 by <NAME>, see AUTHORS :license: United", "= \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name ==", "parameter.name == \"Discharge\" assert parameter.units == \"cubic feet per second (Mean)\" assert list(parameter.values)", "12])) assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max", "2, 3, 4, 5])) assert parameter.mean == 3.0 assert parameter.max == 5.0 assert", "-*- coding: utf-8 -*- \"\"\" test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015", "\"\"\" import pytest import os import numpy as np from datetime import datetime", "3, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert parameter.min_date", "-> 18/4 = 4.5 assert parameter.max == 12.0 assert parameter.min == 1.0 def", "as np from datetime import datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter", "0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units", "pytest import os import numpy as np from datetime import datetime from gagepy.parameter", "parameter = Parameter(name = \"Discharge\", dates = dates_daily, values = np.array([100, 110, 105,", "second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code == \"06_00060_00003\"", "AUTHORS :license: United States Geological Survey (USGS), see LICENSE file \"\"\" import pytest", "\"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.mean == 3.0 assert", "8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates =", "0, 0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter", "112]), units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates)", "4, 5])) assert parameter.mean == 3.0 assert parameter.max == 5.0 assert parameter.min ==", "= \"Discharge\", dates = dates_daily, units = \"cubic feet per second (Mean)\", code", "second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert", "110, 105, 107, 112]), units = \"cubic feet per second (Mean)\", code =", "utf-8 -*- \"\"\" test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015 by <NAME>,", "test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015 by <NAME>, see AUTHORS :license:", "\"Discharge\", dates = dates_daily, units = \"cubic feet per second (Mean)\", code =", "per second (Mean)\", code = \"06_00060_00003\") assert list(parameter.dates) == list(dates_daily) assert parameter.code ==", "np.nan, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0) assert parameter.min_date", "gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily,", "np.array([1, 2, 3, 4, 5])) assert parameter.mean == 3.0 assert parameter.max == 5.0", "datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\", dates", "== \"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units == \"cubic feet per second", "= \"Discharge\", dates = dates_daily, values = np.array([100, 110, 105, 107, 112]), units", "States Geological Survey (USGS), see LICENSE file \"\"\" import pytest import os import", "values = np.array([1, 2, np.nan, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5,", "list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates", "= np.array([1, 2, 3, 4, 5])) assert parameter.mean == 3.0 assert parameter.max ==", "== list(np.array([100, 110, 105, 107, 112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\",", "feet per second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, 3, np.nan,", "parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily,", "assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max ==", "np.array([1, 2, 3, 4, 5])) assert parameter.max_date == datetime(2015, 8, 5, 0, 0)", "4.5 # sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max == 12.0 assert parameter.min", "== 4.5 # sum(values)/len(values) -> 18/4 = 4.5 assert parameter.max == 12.0 assert", "values = np.array([1, 2, 3, np.nan, 12])) assert parameter.mean == 4.5 # sum(values)/len(values)", "parameter.max_date == datetime(2015, 8, 5, 0, 0) assert parameter.min_date == datetime(2015, 8, 1,", "0) assert parameter.min_date == datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter =", "= \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.max_date == datetime(2015,", "107, 112]), units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\") assert", "= \"06_00060_00003\", values = np.array([1, 2, 3, np.nan, 12])) assert parameter.mean == 4.5", "-*- \"\"\" test_parameter ~~~~~~~~~~~~~~~ Tests for `gagepy.parameter` class :copyright: 2015 by <NAME>, see", "== 5.0 assert parameter.min == 1.0 def test_parameter_values_mean_max_min_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\",", "== list(dates_daily) assert parameter.code == \"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units ==", "code = \"06_00060_00003\", values = np.array([1, 2, 3, 4, 5])) assert parameter.max_date ==", "\"06_00060_00003\" assert parameter.name == \"Discharge\" assert parameter.units == \"cubic feet per second (Mean)\"", "== datetime(2015, 8, 1, 0, 0) def test_print_parameter_by_not_capturing_stdout(dates_daily): parameter = Parameter(name = \"Discharge\",", "np from datetime import datetime from gagepy.parameter import Parameter def test_parameter_init(dates_daily): parameter =", "`gagepy.parameter` class :copyright: 2015 by <NAME>, see AUTHORS :license: United States Geological Survey", "112])) def test_parameter_values_mean_max_min_without_nan(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units =", "os import numpy as np from datetime import datetime from gagepy.parameter import Parameter", "12.0 assert parameter.min == 1.0 def test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates", "= dates_daily, units = \"cubic feet per second (Mean)\", code = \"06_00060_00003\", values", "class :copyright: 2015 by <NAME>, see AUTHORS :license: United States Geological Survey (USGS),", "== \"Discharge\" assert parameter.units == \"cubic feet per second (Mean)\" assert list(parameter.values) ==", "second (Mean)\", code = \"06_00060_00003\", values = np.array([1, 2, np.nan, 4, 5])) assert", "== datetime(2015, 8, 1, 0, 0) def test_max_min_date_with_nan(dates_daily): parameter = Parameter(name = \"Discharge\",", "test_max_min_date(dates_daily): parameter = Parameter(name = \"Discharge\", dates = dates_daily, units = \"cubic feet" ]
[]
[ "k = [1, 2, 3, 4] for a in k: for b in", "!= b and a != c and a != d and b !=", "d and b != c and b != d and c != d:", "and b != d and c != d: print(\"%s%s%s%s\" % (a, b, c,", "a != b and a != c and a != d and b", "!= d and b != c and b != d and c !=", "全排列2 求1、2、3、4的全排列 \"\"\" k = [1, 2, 3, 4] for a in k:", "d in k: if a != b and a != c and a", "for c in k: for d in k: if a != b and", "b != c and b != d and c != d: print(\"%s%s%s%s\" %", "<gh_stars>1-10 \"\"\" 全排列2 求1、2、3、4的全排列 \"\"\" k = [1, 2, 3, 4] for a", "\"\"\" 全排列2 求1、2、3、4的全排列 \"\"\" k = [1, 2, 3, 4] for a in", "3, 4] for a in k: for b in k: for c in", "求1、2、3、4的全排列 \"\"\" k = [1, 2, 3, 4] for a in k: for", "b != d and c != d: print(\"%s%s%s%s\" % (a, b, c, d))", "a != d and b != c and b != d and c", "c in k: for d in k: if a != b and a", "k: for c in k: for d in k: if a != b", "k: for d in k: if a != b and a != c", "2, 3, 4] for a in k: for b in k: for c", "in k: if a != b and a != c and a !=", "c and b != d and c != d: print(\"%s%s%s%s\" % (a, b,", "and b != c and b != d and c != d: print(\"%s%s%s%s\"", "k: for b in k: for c in k: for d in k:", "4] for a in k: for b in k: for c in k:", "\"\"\" k = [1, 2, 3, 4] for a in k: for b", "in k: for c in k: for d in k: if a !=", "if a != b and a != c and a != d and", "in k: for b in k: for c in k: for d in", "= [1, 2, 3, 4] for a in k: for b in k:", "b and a != c and a != d and b != c", "!= c and b != d and c != d: print(\"%s%s%s%s\" % (a,", "and a != d and b != c and b != d and", "c and a != d and b != c and b != d", "[1, 2, 3, 4] for a in k: for b in k: for", "for b in k: for c in k: for d in k: if", "for a in k: for b in k: for c in k: for", "!= c and a != d and b != c and b !=", "k: if a != b and a != c and a != d", "for d in k: if a != b and a != c and", "b in k: for c in k: for d in k: if a", "a != c and a != d and b != c and b", "and a != c and a != d and b != c and", "in k: for d in k: if a != b and a !=", "a in k: for b in k: for c in k: for d" ]
[ "django.http import HttpResponse from django.template import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from", "'unpaid_bills': 42, # Todo, but for later as we can't mark a payment", "django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.template import loader from haitiwater.settings", "import HttpResponse from django.template import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data", "= { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups':", "'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but", "import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html')", "but for later as we can't mark a payment yet } return HttpResponse(template.render(context,", "PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context", "from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def index(request): template", "Todo, but for later as we can't mark a payment yet } return", "from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.template import loader from", "template = loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period':", "django.template import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/')", "PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42,", "from ..utils.get_data import * @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context = {", "from django.http import HttpResponse from django.template import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME", "import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def", "get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for later as we can't", "login_required from django.http import HttpResponse from django.template import loader from haitiwater.settings import PROJECT_VERSION,", "'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for later as we can't mark", "loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets':", "get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for", "from django.template import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import *", "PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context =", "# Todo, but for later as we can't mark a payment yet }", "'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for later", "get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo,", "context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request),", "HttpResponse from django.template import loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import", "'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, #", "..utils.get_data import * @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context = { 'project_version':", "= loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(),", "get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for later as we can't mark a", "'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills':", "get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for later as", "'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request), 'unpaid_bills': 42, # Todo, but for later as we", "42, # Todo, but for later as we can't mark a payment yet", "* @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name':", "{ 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request),", "@login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME,", "index(request): template = loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request),", "PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals': get_total_consumers(request),", "def index(request): template = loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name':", "haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def index(request): template =", "loader from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME from ..utils.get_data import * @login_required(login_url='/login/') def index(request):", "'project_version': PROJECT_VERSION, 'project_name': PROJECT_NAME, 'zone_name': get_zone(request), 'current_period': get_current_month_fr(), 'water_outlets': get_outlets(request), 'consumer_groups': get_amount_household(request), 'consumer_individuals':", "import login_required from django.http import HttpResponse from django.template import loader from haitiwater.settings import", "for later as we can't mark a payment yet } return HttpResponse(template.render(context, request))", "import * @login_required(login_url='/login/') def index(request): template = loader.get_template('consumers.html') context = { 'project_version': PROJECT_VERSION," ]
[ "'current_asset' query = (f'SELECT date FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM {table});')", "{table} ' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today =", "TypeError: self.db.insert(table, record, msg=False) else: if today > latest_date: self.db.insert(table, record, msg=False) def", "self.db.insert(table, record, msg=False) else: if today > latest_date: self.db.insert(table, record, msg=False) def __repr__(self)", "pyrich.record import Record class Asset(Record): def __init__(self, table: str) -> None: super().__init__(table) def", "Asset(Record): def __init__(self, table: str) -> None: super().__init__(table) def record_current_asset(self, current_asset: float) ->", "timestamp, 'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False)", "current_asset: float) -> None: table = 'current_asset' query = (f'SELECT date FROM {table}", "None: table = 'current_asset' query = (f'SELECT date FROM {table} ' f'WHERE id=(SELECT", "{ 'date': timestamp, 'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table,", "pandas as pd from pyrich.record import Record class Asset(Record): def __init__(self, table: str)", "= { 'date': timestamp, 'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0] except TypeError:", "table = 'current_asset' query = (f'SELECT date FROM {table} ' f'WHERE id=(SELECT MAX(id)", "datetime import date import pandas as pd from pyrich.record import Record class Asset(Record):", "today = date.today() timestamp = today.strftime(date_format) record = { 'date': timestamp, 'amount': current_asset,", "import pandas as pd from pyrich.record import Record class Asset(Record): def __init__(self, table:", "= 'current_asset' query = (f'SELECT date FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM", "date import pandas as pd from pyrich.record import Record class Asset(Record): def __init__(self,", "try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else: if today >", "def record_current_asset(self, current_asset: float) -> None: table = 'current_asset' query = (f'SELECT date", "'%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format) record = { 'date': timestamp, 'amount':", "else: if today > latest_date: self.db.insert(table, record, msg=False) def __repr__(self) -> str: return", "pd from pyrich.record import Record class Asset(Record): def __init__(self, table: str) -> None:", "= today.strftime(date_format) record = { 'date': timestamp, 'amount': current_asset, } try: latest_date =", "' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today()", "table: str) -> None: super().__init__(table) def record_current_asset(self, current_asset: float) -> None: table =", "query = (f'SELECT date FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query)", "-> None: super().__init__(table) def record_current_asset(self, current_asset: float) -> None: table = 'current_asset' query", "msg=False) else: if today > latest_date: self.db.insert(table, record, msg=False) def __repr__(self) -> str:", "import Record class Asset(Record): def __init__(self, table: str) -> None: super().__init__(table) def record_current_asset(self,", "record_current_asset(self, current_asset: float) -> None: table = 'current_asset' query = (f'SELECT date FROM", "import date import pandas as pd from pyrich.record import Record class Asset(Record): def", "{table});') self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format) record =", "str) -> None: super().__init__(table) def record_current_asset(self, current_asset: float) -> None: table = 'current_asset'", "date_format = '%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format) record = { 'date':", "latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else: if today > latest_date:", "} try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else: if today", "FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format) record", "today.strftime(date_format) record = { 'date': timestamp, 'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0]", "float) -> None: table = 'current_asset' query = (f'SELECT date FROM {table} '", "MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format)", "= '%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format) record = { 'date': timestamp,", "id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today() timestamp =", "as pd from pyrich.record import Record class Asset(Record): def __init__(self, table: str) ->", "def __init__(self, table: str) -> None: super().__init__(table) def record_current_asset(self, current_asset: float) -> None:", "= (f'SELECT date FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format", "self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today() timestamp = today.strftime(date_format) record = {", "__init__(self, table: str) -> None: super().__init__(table) def record_current_asset(self, current_asset: float) -> None: table", "'date': timestamp, 'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record,", "date.today() timestamp = today.strftime(date_format) record = { 'date': timestamp, 'amount': current_asset, } try:", "current_asset, } try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else: if", "timestamp = today.strftime(date_format) record = { 'date': timestamp, 'amount': current_asset, } try: latest_date", "record = { 'date': timestamp, 'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0] except", "if today > latest_date: self.db.insert(table, record, msg=False) def __repr__(self) -> str: return f\"Asset(table='{self.table}')\"", "(f'SELECT date FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format =", "= date.today() timestamp = today.strftime(date_format) record = { 'date': timestamp, 'amount': current_asset, }", "'amount': current_asset, } try: latest_date = self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else:", "None: super().__init__(table) def record_current_asset(self, current_asset: float) -> None: table = 'current_asset' query =", "from pyrich.record import Record class Asset(Record): def __init__(self, table: str) -> None: super().__init__(table)", "date FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d'", "class Asset(Record): def __init__(self, table: str) -> None: super().__init__(table) def record_current_asset(self, current_asset: float)", "FROM {table} ' f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today", "= self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else: if today > latest_date: self.db.insert(table,", "self.db.cur.fetchone()[0] except TypeError: self.db.insert(table, record, msg=False) else: if today > latest_date: self.db.insert(table, record,", "f'WHERE id=(SELECT MAX(id) FROM {table});') self.db.run_query(query) date_format = '%Y-%m-%d' today = date.today() timestamp", "-> None: table = 'current_asset' query = (f'SELECT date FROM {table} ' f'WHERE", "except TypeError: self.db.insert(table, record, msg=False) else: if today > latest_date: self.db.insert(table, record, msg=False)", "record, msg=False) else: if today > latest_date: self.db.insert(table, record, msg=False) def __repr__(self) ->", "Record class Asset(Record): def __init__(self, table: str) -> None: super().__init__(table) def record_current_asset(self, current_asset:", "from datetime import date import pandas as pd from pyrich.record import Record class", "super().__init__(table) def record_current_asset(self, current_asset: float) -> None: table = 'current_asset' query = (f'SELECT" ]
[ "<gh_stars>10-100 from browser import window def wrap(func): # Transforms a function f into", "traceback in case of exception def f(*args, **kw): try: return func(*args, **kw) except", "{0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout = window.clearTimeout def", "exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval = window.clearInterval", "def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable", "browser import window def wrap(func): # Transforms a function f into another function", "window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func))", "Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval", "return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set", "a function f into another function that prints a # traceback in case", "return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id)", "set a variable used to stop loops that last more than x seconds", "import sys sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval):", "as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval =", "used to stop loops that last more than x seconds assert isinstance(x, int)", "of exception def f(*args, **kw): try: return func(*args, **kw) except Exception as exc:", "into another function that prints a # traceback in case of exception def", "that prints a # traceback in case of exception def f(*args, **kw): try:", "# traceback in case of exception def f(*args, **kw): try: return func(*args, **kw)", "function that prints a # traceback in case of exception def f(*args, **kw):", "try: return func(*args, **kw) except Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import", "sys sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return", "= window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval))", "set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): #", "variable used to stop loops that last more than x seconds assert isinstance(x,", "exception def f(*args, **kw): try: return func(*args, **kw) except Exception as exc: msg", "clear_interval = window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return", "loops that last more than x seconds assert isinstance(x, int) __BRYTHON__.loop_timeout = x", "window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable used to stop loops that last", "wrap(func): # Transforms a function f into another function that prints a #", "'{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout = window.clearTimeout", "clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func):", "function f into another function that prints a # traceback in case of", "case of exception def f(*args, **kw): try: return func(*args, **kw) except Exception as", "msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout", "a variable used to stop loops that last more than x seconds assert", "func(*args, **kw) except Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg)", "except Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f", "from browser import window def wrap(func): # Transforms a function f into another", "a # traceback in case of exception def f(*args, **kw): try: return func(*args,", "window def wrap(func): # Transforms a function f into another function that prints", "return func(*args, **kw) except Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys", "def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def", "window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def", "sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval)", "return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable used to", "int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable used to stop", "def f(*args, **kw): try: return func(*args, **kw) except Exception as exc: msg =", "def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x):", "f clear_interval = window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval):", "prints a # traceback in case of exception def f(*args, **kw): try: return", "set_loop_timeout(x): # set a variable used to stop loops that last more than", "to stop loops that last more than x seconds assert isinstance(x, int) __BRYTHON__.loop_timeout", "= window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return", "set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id):", "# set a variable used to stop loops that last more than x", "# Transforms a function f into another function that prints a # traceback", "another function that prints a # traceback in case of exception def f(*args,", "def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable used to stop loops", "f(*args, **kw): try: return func(*args, **kw) except Exception as exc: msg = '{0.info}\\n{0.__name__}:", "def wrap(func): # Transforms a function f into another function that prints a", "f into another function that prints a # traceback in case of exception", "int(window.setTimeout(wrap(func),interval)) def request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a", "= '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return f clear_interval = window.clearInterval clear_timeout =", "request_animation_frame(func): return int(window.requestAnimationFrame(func)) def cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable used", "def set_loop_timeout(x): # set a variable used to stop loops that last more", "in case of exception def f(*args, **kw): try: return func(*args, **kw) except Exception", "cancel_animation_frame(int_id): window.cancelAnimationFrame(int_id) def set_loop_timeout(x): # set a variable used to stop loops that", "stop loops that last more than x seconds assert isinstance(x, int) __BRYTHON__.loop_timeout =", "window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def set_timeout(func,interval): return int(window.setTimeout(wrap(func),interval)) def", "**kw) except Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc) import sys sys.stderr.write(msg) return", "import window def wrap(func): # Transforms a function f into another function that", "**kw): try: return func(*args, **kw) except Exception as exc: msg = '{0.info}\\n{0.__name__}: {0.args[0]}'.format(exc)", "Transforms a function f into another function that prints a # traceback in", "return f clear_interval = window.clearInterval clear_timeout = window.clearTimeout def set_interval(func,interval): return window.setInterval(wrap(func),interval) def" ]
[ "if is_more: max_version = file_version max_key = filename if not max_key: logger.info(\"No new", "filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\") with", "\"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All", "max_version[0]: is_more = True elif file_version[1] > max_version[1]: is_more = True elif file_version[2]", "'r') for member in zip_file.namelist(): # copy file (taken from zipfile's extract) filename", "filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p in lst[0:-1]] except", "= os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source,", "\"/f\"] subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could not kill task. Error =", "stderr = popen.communicate() lst = stdout.split(\"\\n\") for l in lst: ll = l.split()", "True if is_more: max_version = file_version max_key = filename if not max_key: logger.info(\"No", "= l.split() if not len(ll): continue name = ll[0] try: pid = int(ll[1])", "max_key, out_filename) return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if", "with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All python", "files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version logger.info(\"My version is", "p in lst[0:-1]] except ValueError: continue is_more = False if file_version[0] > max_version[0]:", "transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__", "in zip_file.namelist(): # copy file (taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source", "out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not filename: sys.exit(0)", "0] try: with open(\"VERSION\") as f: version = f.read().strip() # increment version each", "zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All python processes have been killed\", severity=\"WARNING\")", "False if file_version[0] > max_version[0]: is_more = True elif file_version[1] > max_version[1]: is_more", "e: logger.error('Could not kill task. Error = %s' % e) def check_download(): client", "target = file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done", "= file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading", "\"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p in", "\"python.exe\" in l: try: logger.info(\"Killing task '%s' with pid %s...\" % (name, pid))", "max_version[2]: is_more = True if is_more: max_version = file_version max_key = filename if", "zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target", "REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version logger.info(\"My version", "except: logger.warning(\"Old version invalid\") return t def kill_python_processes(): command = [\"tasklist\"] popen =", "stdout.split(\"\\n\") for l in lst: ll = l.split() if not len(ll): continue name", "[\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could not kill", "\"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0,", "version %s to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version,", "my_version = max_version logger.info(\"My version is %s\", \".\".join(str(p) for p in max_version)) max_key", "if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not filename: sys.exit(0) zip_file", "logger, log_event import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME", "max_version logger.info(\"My version is %s\", \".\".join(str(p) for p in max_version)) max_key = None", "os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target)", "filename = check_download() if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member", "= \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename", "None for s3_key in files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst", "for p in version.split(\".\")] except: logger.warning(\"Old version invalid\") return t def kill_python_processes(): command", "True elif file_version[1] > max_version[1]: is_more = True elif file_version[2] > max_version[2]: is_more", "pid = int(ll[1]) except: continue if pid == os.getpid(): continue if \"python.exe\" in", "try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\") with source,", "= \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0,", "int(ll[1]) except: continue if pid == os.getpid(): continue if \"python.exe\" in l: try:", "shell=True) except Exception as e: logger.error('Could not kill task. Error = %s' %", "= zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass", "= max_version logger.info(\"My version is %s\", \".\".join(str(p) for p in max_version)) max_key =", "%s\", \".\".join(str(p) for p in max_version)) max_key = None for s3_key in files:", "def check_download(): client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version()", "if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p", "f.read().strip() # increment version each time the script is called t = [int(p)", "filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname =", "version each time the script is called t = [int(p) for p in", "max_version = get_my_version() my_version = max_version logger.info(\"My version is %s\", \".\".join(str(p) for p", "True elif file_version[2] > max_version[2]: is_more = True if is_more: max_version = file_version", "Exception as e: logger.error('Could not kill task. Error = %s' % e) def", "REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def", "severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\"", "def get_my_version(): t = [0, 0, 0] try: with open(\"VERSION\") as f: version", "= True elif file_version[2] > max_version[2]: is_more = True if is_more: max_version =", "out_filename) return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not", "\"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r')", "r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0, 0] try: with open(\"VERSION\") as f:", "= %s' % e) def check_download(): client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME,", "> max_version[1]: is_more = True elif file_version[2] > max_version[2]: is_more = True if", "= [int(p) for p in lst[0:-1]] except ValueError: continue is_more = False if", "def kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate()", "elif file_version[1] > max_version[1]: is_more = True elif file_version[2] > max_version[2]: is_more =", "out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\")", "is_more: max_version = file_version max_key = filename if not max_key: logger.info(\"No new version", "command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could", "try: logger.info(\"Killing task '%s' with pid %s...\" % (name, pid)) command = [\"taskkill\",", "if pid == os.getpid(): continue if \"python.exe\" in l: try: logger.info(\"Killing task '%s'", "% (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key) transfer = S3Transfer(client)", "sys, shutil import zipfile, subprocess from serverdaemon.logsetup import setup_logging, logger, log_event import boto3", "%s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key) transfer =", "for p in max_version)) max_key = None for s3_key in files: filename =", "lst: ll = l.split() if not len(ll): continue name = ll[0] try: pid", "subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\") for l in lst:", "version found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to", "\".\".join(str(p) for p in max_version)) max_key = None for s3_key in files: filename", "max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if", "file_version max_key = filename if not max_key: logger.info(\"No new version found. Bailing out.\")", "pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as e:", "target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All python processes have been killed\",", "import setup_logging, logger, log_event import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION =", "import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\"", "import os, sys, shutil import zipfile, subprocess from serverdaemon.logsetup import setup_logging, logger, log_event", "not max_key: logger.info(\"No new version found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon", "\"Upgrading Serverdaemon from version %s to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version", "== \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename,", "ValueError: continue is_more = False if file_version[0] > max_version[0]: is_more = True elif", "boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version logger.info(\"My", "(taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER,", "= ll[0] try: pid = int(ll[1]) except: continue if pid == os.getpid(): continue", "= client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version logger.info(\"My version is %s\",", "l.split() if not len(ll): continue name = ll[0] try: pid = int(ll[1]) except:", "is_more = False if file_version[0] > max_version[0]: is_more = True elif file_version[1] >", "return t def kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr", "in lst: ll = l.split() if not len(ll): continue name = ll[0] try:", "source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except:", "ll = l.split() if not len(ll): continue name = ll[0] try: pid =", "pid %s...\" % (name, pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True)", "% e) def check_download(): client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version", "get_my_version(): t = [0, 0, 0] try: with open(\"VERSION\") as f: version =", "with pid %s...\" % (name, pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command,", "is_more = True elif file_version[1] > max_version[1]: is_more = True elif file_version[2] >", "= check_download() if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member in", "the script is called t = [int(p) for p in version.split(\".\")] except: logger.warning(\"Old", "max_version)) max_key = None for s3_key in files: filename = s3_key['Key'] if \"drift-serverdaemon-\"", "is %s\", \".\".join(str(p) for p in max_version)) max_key = None for s3_key in", "script is called t = [int(p) for p in version.split(\".\")] except: logger.warning(\"Old version", "S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__ == \"__main__\":", "pass target = file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\",", "version is %s\", \".\".join(str(p) for p in max_version)) max_key = None for s3_key", "in lst[0:-1]] except ValueError: continue is_more = False if file_version[0] > max_version[0]: is_more", "os.getpid(): continue if \"python.exe\" in l: try: logger.info(\"Killing task '%s' with pid %s...\"", "to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key) transfer", "= [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could not", "len(ll): continue name = ll[0] try: pid = int(ll[1]) except: continue if pid", "found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to %s\"", "f: version = f.read().strip() # increment version each time the script is called", "None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to %s\" % (my_version, max_version), severity=\"WARNING\")", "= \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version():", "out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\") with source, target:", "check_download(): client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version", "task '%s' with pid %s...\" % (name, pid)) command = [\"taskkill\", \"/PID\", str(pid),", "os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\")", "boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER", "member in zip_file.namelist(): # copy file (taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:])", "from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename)", "version.split(\".\")] except: logger.warning(\"Old version invalid\") return t def kill_python_processes(): command = [\"tasklist\"] popen", "get_my_version() my_version = max_version logger.info(\"My version is %s\", \".\".join(str(p) for p in max_version))", "new version found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s", "[\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\") for", "continue name = ll[0] try: pid = int(ll[1]) except: continue if pid ==", "not kill task. Error = %s' % e) def check_download(): client = boto3.client('s3',", "filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version =", "= get_my_version() my_version = max_version logger.info(\"My version is %s\", \".\".join(str(p) for p in", "in files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try:", "= s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p)", "os.makedirs(out_dirname) except: pass target = file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close()", "logger.info(\"Killing task '%s' with pid %s...\" % (name, pid)) command = [\"taskkill\", \"/PID\",", "command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst =", "check_download() if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member in zip_file.namelist():", "time the script is called t = [int(p) for p in version.split(\".\")] except:", "elif file_version[2] > max_version[2]: is_more = True if is_more: max_version = file_version max_key", "Serverdaemon from version %s to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s,", "filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): # copy file", "shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All python processes have been", "extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname", "try: file_version = [int(p) for p in lst[0:-1]] except ValueError: continue is_more =", "if not max_key: logger.info(\"No new version found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading", "subprocess from serverdaemon.logsetup import setup_logging, logger, log_event import boto3 from boto3.s3.transfer import S3Transfer,", "logger.info(\"My version is %s\", \".\".join(str(p) for p in max_version)) max_key = None for", "zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try:", "is called t = [int(p) for p in version.split(\".\")] except: logger.warning(\"Old version invalid\")", "not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): # copy", "= filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p in lst[0:-1]] except ValueError: continue", "\"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could not kill task.", "return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not filename:", "\"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname)", "if file_version[0] > max_version[0]: is_more = True elif file_version[1] > max_version[1]: is_more =", "l: try: logger.info(\"Killing task '%s' with pid %s...\" % (name, pid)) command =", "t def kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr =", "as e: logger.error('Could not kill task. Error = %s' % e) def check_download():", "= [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\")", "name = ll[0] try: pid = int(ll[1]) except: continue if pid == os.getpid():", "= subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\") for l in", "Error = %s' % e) def check_download(): client = boto3.client('s3', REGION) files =", "[0, 0, 0] try: with open(\"VERSION\") as f: version = f.read().strip() # increment", "in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p in lst[0:-1]]", "(my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key) transfer = S3Transfer(client) out_filename", "filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p in lst[0:-1]] except ValueError: continue is_more", "0, 0] try: with open(\"VERSION\") as f: version = f.read().strip() # increment version", "== os.getpid(): continue if \"python.exe\" in l: try: logger.info(\"Killing task '%s' with pid", "kill task. Error = %s' % e) def check_download(): client = boto3.client('s3', REGION)", "max_key: logger.info(\"No new version found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from", "serverdaemon.logsetup import setup_logging, logger, log_event import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION", "= [int(p) for p in version.split(\".\")] except: logger.warning(\"Old version invalid\") return t def", "[int(p) for p in lst[0:-1]] except ValueError: continue is_more = False if file_version[0]", "UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0, 0]", "%s' % e) def check_download(): client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents']", "invalid\") return t def kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout,", "max_version, max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename", "= boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version", "max_version[1]: is_more = True elif file_version[2] > max_version[2]: is_more = True if is_more:", "subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could not kill task. Error = %s'", "ll[0] try: pid = int(ll[1]) except: continue if pid == os.getpid(): continue if", "if \"python.exe\" in l: try: logger.info(\"Killing task '%s' with pid %s...\" % (name,", "stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\") for l in lst: ll", "version invalid\") return t def kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE)", "transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download()", "return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to %s\" % (my_version, max_version),", "if not len(ll): continue name = ll[0] try: pid = int(ll[1]) except: continue", "max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key) transfer = S3Transfer(client) out_filename =", "zip_file = zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): # copy file (taken from", "continue is_more = False if file_version[0] > max_version[0]: is_more = True elif file_version[1]", "= os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target = file(out_filename,", "= \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0, 0] try:", "for s3_key in files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst =", "popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\") for l", "= popen.communicate() lst = stdout.split(\"\\n\") for l in lst: ll = l.split() if", "p in version.split(\".\")] except: logger.warning(\"Old version invalid\") return t def kill_python_processes(): command =", "out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename) os.makedirs(out_dirname) except: pass target =", "log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found", "boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\"", "t = [int(p) for p in version.split(\".\")] except: logger.warning(\"Old version invalid\") return t", "s3_key in files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\")", "= file_version max_key = filename if not max_key: logger.info(\"No new version found. Bailing", "Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to %s\" %", "filename if not max_key: logger.info(\"No new version found. Bailing out.\") return None log_event(\"upgrade_daemon\",", "> max_version[2]: is_more = True if is_more: max_version = file_version max_key = filename", "is_more = True elif file_version[2] > max_version[2]: is_more = True if is_more: max_version", "= f.read().strip() # increment version each time the script is called t =", "logger.info(\"found version %s, %s\", max_version, max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME,", "TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\"", "logger.warning(\"Old version invalid\") return t def kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command,", "logger.error('Could not kill task. Error = %s' % e) def check_download(): client =", "\"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t", "%s\", max_version, max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return", "log_event import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME =", "= zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): # copy file (taken from zipfile's", "= None for s3_key in files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename:", "= True elif file_version[1] > max_version[1]: is_more = True elif file_version[2] > max_version[2]:", "%s to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\", max_version, max_key)", "client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version logger.info(\"My version is %s\", \".\".join(str(p)", "is_more = True if is_more: max_version = file_version max_key = filename if not", "import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER", "from serverdaemon.logsetup import setup_logging, logger, log_event import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig", "= int(ll[1]) except: continue if pid == os.getpid(): continue if \"python.exe\" in l:", "(name, pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as", "sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): # copy file (taken", "if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): #", "file_version = [int(p) for p in lst[0:-1]] except ValueError: continue is_more = False", "max_key = None for s3_key in files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in", "S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER =", "setup_logging, logger, log_event import boto3 from boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\"", "from version %s to %s\" % (my_version, max_version), severity=\"WARNING\") logger.info(\"found version %s, %s\",", "increment version each time the script is called t = [int(p) for p", "version = f.read().strip() # increment version each time the script is called t", "version %s, %s\", max_version, max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key,", "stdout, stderr = popen.communicate() lst = stdout.split(\"\\n\") for l in lst: ll =", "for member in zip_file.namelist(): # copy file (taken from zipfile's extract) filename =", "target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All python processes have", "%s...\" % (name, pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except", "file_version[1] > max_version[1]: is_more = True elif file_version[2] > max_version[2]: is_more = True", "file_version[2] > max_version[2]: is_more = True if is_more: max_version = file_version max_key =", "str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception as e: logger.error('Could not kill task. Error", "task. Error = %s' % e) def check_download(): client = boto3.client('s3', REGION) files", "\"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__ == \"__main__\": setup_logging(\"updatedaemon\") filename =", "in l: try: logger.info(\"Killing task '%s' with pid %s...\" % (name, pid)) command", "\"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0, 0] try: with", "except Exception as e: logger.error('Could not kill task. Error = %s' % e)", "not len(ll): continue name = ll[0] try: pid = int(ll[1]) except: continue if", "for p in lst[0:-1]] except ValueError: continue is_more = False if file_version[0] >", "popen.communicate() lst = stdout.split(\"\\n\") for l in lst: ll = l.split() if not", "e) def check_download(): client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version =", "source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon. All python processes", "each time the script is called t = [int(p) for p in version.split(\".\")]", "%s, %s\", max_version, max_key) transfer = S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename)", "= r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0, 0] try: with open(\"VERSION\") as", "= S3Transfer(client) out_filename = \"c:\\\\temp\\\\drift-serverdaemon.zip\" transfer.download_file(BUCKET_NAME, max_key, out_filename) return out_filename if __name__ ==", "t = [0, 0, 0] try: with open(\"VERSION\") as f: version = f.read().strip()", "zipfile.ZipFile(filename, 'r') for member in zip_file.namelist(): # copy file (taken from zipfile's extract)", "zip_file.namelist(): # copy file (taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source =", "file (taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename =", "as f: version = f.read().strip() # increment version each time the script is", "called t = [int(p) for p in version.split(\".\")] except: logger.warning(\"Old version invalid\") return", "= filename if not max_key: logger.info(\"No new version found. Bailing out.\") return None", "with open(\"VERSION\") as f: version = f.read().strip() # increment version each time the", "= True if is_more: max_version = file_version max_key = filename if not max_key:", "continue if pid == os.getpid(): continue if \"python.exe\" in l: try: logger.info(\"Killing task", "shutil import zipfile, subprocess from serverdaemon.logsetup import setup_logging, logger, log_event import boto3 from", "file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes() log_event(\"upgrade_daemon_complete\", \"Done Upgrading Serverdaemon.", "> max_version[0]: is_more = True elif file_version[1] > max_version[1]: is_more = True elif", "except ValueError: continue is_more = False if file_version[0] > max_version[0]: is_more = True", "pid == os.getpid(): continue if \"python.exe\" in l: try: logger.info(\"Killing task '%s' with", "= stdout.split(\"\\n\") for l in lst: ll = l.split() if not len(ll): continue", "__name__ == \"__main__\": setup_logging(\"updatedaemon\") filename = check_download() if not filename: sys.exit(0) zip_file =", "'%s' with pid %s...\" % (name, pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"]", "s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for", "INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t = [0, 0, 0] try: with open(\"VERSION\")", "import zipfile, subprocess from serverdaemon.logsetup import setup_logging, logger, log_event import boto3 from boto3.s3.transfer", "except: continue if pid == os.getpid(): continue if \"python.exe\" in l: try: logger.info(\"Killing", "max_key = filename if not max_key: logger.info(\"No new version found. Bailing out.\") return", "try: with open(\"VERSION\") as f: version = f.read().strip() # increment version each time", "lst[0:-1]] except ValueError: continue is_more = False if file_version[0] > max_version[0]: is_more =", "except: pass target = file(out_filename, \"wb\") with source, target: shutil.copyfileobj(source, target) zip_file.close() kill_python_processes()", "= [0, 0, 0] try: with open(\"VERSION\") as f: version = f.read().strip() #", "% (name, pid)) command = [\"taskkill\", \"/PID\", str(pid), \"/f\"] subprocess.check_call(command, shell=True) except Exception", "os, sys, shutil import zipfile, subprocess from serverdaemon.logsetup import setup_logging, logger, log_event import", "max_version = file_version max_key = filename if not max_key: logger.info(\"No new version found.", "in max_version)) max_key = None for s3_key in files: filename = s3_key['Key'] if", "in version.split(\".\")] except: logger.warning(\"Old version invalid\") return t def kill_python_processes(): command = [\"tasklist\"]", "# copy file (taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member)", "open(\"VERSION\") as f: version = f.read().strip() # increment version each time the script", "client = boto3.client('s3', REGION) files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version =", "= False if file_version[0] > max_version[0]: is_more = True elif file_version[1] > max_version[1]:", "l in lst: ll = l.split() if not len(ll): continue name = ll[0]", "zipfile, subprocess from serverdaemon.logsetup import setup_logging, logger, log_event import boto3 from boto3.s3.transfer import", "copy file (taken from zipfile's extract) filename = \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename", "out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version %s to %s\" % (my_version,", "[int(p) for p in version.split(\".\")] except: logger.warning(\"Old version invalid\") return t def kill_python_processes():", "files: filename = s3_key['Key'] if \"drift-serverdaemon-\" in filename: lst = filename.split(\"-\")[-1].split(\".\") try: file_version", "try: pid = int(ll[1]) except: continue if pid == os.getpid(): continue if \"python.exe\"", "from boto3.s3.transfer import S3Transfer, TransferConfig REGION = \"eu-west-1\" BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER =", "p in max_version)) max_key = None for s3_key in files: filename = s3_key['Key']", "BUCKET_NAME = \"directive-tiers.dg-api.com\" UE4_BUILDS_FOLDER = \"ue4-builds\" INSTALL_FOLDER = r\"c:\\drift-serverdaemon\" def get_my_version(): t =", "kill_python_processes(): command = [\"tasklist\"] popen = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = popen.communicate() lst", "setup_logging(\"updatedaemon\") filename = check_download() if not filename: sys.exit(0) zip_file = zipfile.ZipFile(filename, 'r') for", "Prefix=UE4_BUILDS_FOLDER)['Contents'] max_version = get_my_version() my_version = max_version logger.info(\"My version is %s\", \".\".join(str(p) for", "lst = filename.split(\"-\")[-1].split(\".\") try: file_version = [int(p) for p in lst[0:-1]] except ValueError:", "logger.info(\"No new version found. Bailing out.\") return None log_event(\"upgrade_daemon\", \"Upgrading Serverdaemon from version", "= \"/\".join(member.split(\"/\")[1:]) source = zip_file.open(member) out_filename = os.path.join(INSTALL_FOLDER, filename) try: out_dirname = os.path.dirname(out_filename)", "continue if \"python.exe\" in l: try: logger.info(\"Killing task '%s' with pid %s...\" %", "for l in lst: ll = l.split() if not len(ll): continue name =", "file_version[0] > max_version[0]: is_more = True elif file_version[1] > max_version[1]: is_more = True", "lst = stdout.split(\"\\n\") for l in lst: ll = l.split() if not len(ll):", "# increment version each time the script is called t = [int(p) for" ]
[ "EmailStr = None # search patterns salary_max: Optional[int] = 0 salary_min: Optional[int] =", "from typing import Optional from pydantic import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr", "import Optional from pydantic import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr = None", "email: EmailStr = None # search patterns salary_max: Optional[int] = 0 salary_min: Optional[int]", "EmailSub(BaseModel): email: EmailStr = None # search patterns salary_max: Optional[int] = 0 salary_min:", "= None # search patterns salary_max: Optional[int] = 0 salary_min: Optional[int] = 0", "search patterns salary_max: Optional[int] = 0 salary_min: Optional[int] = 0 class Config: orm_mode", "None # search patterns salary_max: Optional[int] = 0 salary_min: Optional[int] = 0 class", "# search patterns salary_max: Optional[int] = 0 salary_min: Optional[int] = 0 class Config:", "BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr = None # search patterns salary_max: Optional[int]", "pydantic import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr = None # search patterns", "salary_max: Optional[int] = 0 salary_min: Optional[int] = 0 class Config: orm_mode = True", "Optional from pydantic import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr = None #", "class EmailSub(BaseModel): email: EmailStr = None # search patterns salary_max: Optional[int] = 0", "from pydantic import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr = None # search", "import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr = None # search patterns salary_max:", "EmailStr class EmailSub(BaseModel): email: EmailStr = None # search patterns salary_max: Optional[int] =", "patterns salary_max: Optional[int] = 0 salary_min: Optional[int] = 0 class Config: orm_mode =", "typing import Optional from pydantic import BaseModel, EmailStr class EmailSub(BaseModel): email: EmailStr =" ]
[ "for g in geneset if g in assoc): for x in assoc[gene]: if", "= [g for g in geneset if g in assoc] for gene in", "group \"\"\" term2itemids = defaultdict(set) genes = [g for g in geneset if", "/n is different between the study group and the population \"\"\" if min_ratio", "-*- __copyright__ = \"Copyright (C) 2010-2016, <NAME> al., All rights reserved.\" __author__ =", "return True s = float(study_go) / study_n p = float(pop_go) / pop_n if", "p > min_ratio return p / s > min_ratio # Copyright (C) 2010-2016,", "assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc, geneset,", "<reponame>ezequieljsosa/goatools #!/usr/bin/env python # -*- coding: UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016,", "if g in assoc): for x in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id]", "assoc, obo_dag, log): \"\"\"Get the terms in the study group \"\"\" term2itemids =", "of {M:>6,} {DESC} items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def", "return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check if the ratio", "gene in genes: for x in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,}", "pop_n if s > p: return s / p > min_ratio return p", "the terms in the study group \"\"\" term2itemids = defaultdict(set) genes = [g", "study_n p = float(pop_go) / pop_n if s > p: return s /", "assoc): for x in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] += 1 return", "geneset if g in assoc] for gene in genes: for x in assoc[gene]:", "s / p > min_ratio return p / s > min_ratio # Copyright", "__author__ = \"various\" from collections import defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count", "al., All rights reserved.\" __author__ = \"various\" from collections import defaultdict, Counter def", "def count_terms(geneset, assoc, obo_dag): \"\"\"count the number of terms in the study group", "the population \"\"\" if min_ratio is None: return True s = float(study_go) /", "\"\"\" check if the ratio go /n is different between the study group", "min_ratio return p / s > min_ratio # Copyright (C) 2010-2016, <NAME> al.,", "-*- coding: UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016, <NAME> al., All rights", "terms in the study group \"\"\" term2itemids = defaultdict(set) genes = [g for", "\"various\" from collections import defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the number", "x in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc, geneset, assoc, obo_dag,", "= float(pop_go) / pop_n if s > p: return s / p >", "between the study group and the population \"\"\" if min_ratio is None: return", "get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the terms in the study group \"\"\"", "= \"various\" from collections import defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the", "found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go,", "min_ratio is None: return True s = float(study_go) / study_n p = float(pop_go)", "/ s > min_ratio # Copyright (C) 2010-2016, <NAME> al., All rights reserved.", "\"\"\"count the number of terms in the study group \"\"\" term_cnt = Counter()", "Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the number of terms in the study", "in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items found in association\\n\".format( DESC=desc,", "in the study group \"\"\" term2itemids = defaultdict(set) genes = [g for g", "term_cnt = Counter() for gene in (g for g in geneset if g", "for x in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,}", "return s / p > min_ratio return p / s > min_ratio #", "# -*- coding: UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016, <NAME> al., All", "for x in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt", "term_cnt def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the terms in the study", "return p / s > min_ratio # Copyright (C) 2010-2016, <NAME> al., All", "s = float(study_go) / study_n p = float(pop_go) / pop_n if s >", "obo_dag, log): \"\"\"Get the terms in the study group \"\"\" term2itemids = defaultdict(set)", "> p: return s / p > min_ratio return p / s >", "s > p: return s / p > min_ratio return p / s", "\"\"\" term_cnt = Counter() for gene in (g for g in geneset if", "defaultdict(set) genes = [g for g in geneset if g in assoc] for", "float(study_go) / study_n p = float(pop_go) / pop_n if s > p: return", "x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items found in association\\n\".format(", "obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get", "(C) 2010-2016, <NAME> al., All rights reserved.\" __author__ = \"various\" from collections import", "geneset, assoc, obo_dag, log): \"\"\"Get the terms in the study group \"\"\" term2itemids", "in geneset if g in assoc] for gene in genes: for x in", "the number of terms in the study group \"\"\" term_cnt = Counter() for", "in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc,", "log): \"\"\"Get the terms in the study group \"\"\" term2itemids = defaultdict(set) genes", "from collections import defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the number of", "the study group \"\"\" term2itemids = defaultdict(set) genes = [g for g in", "= \"Copyright (C) 2010-2016, <NAME> al., All rights reserved.\" __author__ = \"various\" from", "coding: UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016, <NAME> al., All rights reserved.\"", "return term_cnt def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the terms in the", "UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016, <NAME> al., All rights reserved.\" __author__", "float(pop_go) / pop_n if s > p: return s / p > min_ratio", "association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\"", "is different between the study group and the population \"\"\" if min_ratio is", "in assoc): for x in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] += 1", "for gene in genes: for x in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene)", "if s > p: return s / p > min_ratio return p /", "reserved.\" __author__ = \"various\" from collections import defaultdict, Counter def count_terms(geneset, assoc, obo_dag):", "Counter() for gene in (g for g in geneset if g in assoc):", "pop_go, pop_n): \"\"\" check if the ratio go /n is different between the", "[g for g in geneset if g in assoc] for gene in genes:", "g in assoc] for gene in genes: for x in assoc[gene]: if x", "study group and the population \"\"\" if min_ratio is None: return True s", "{DESC} items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go,", "study group \"\"\" term_cnt = Counter() for gene in (g for g in", "obo_dag): \"\"\"count the number of terms in the study group \"\"\" term_cnt =", "p / s > min_ratio # Copyright (C) 2010-2016, <NAME> al., All rights", "#!/usr/bin/env python # -*- coding: UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016, <NAME>", "rights reserved.\" __author__ = \"various\" from collections import defaultdict, Counter def count_terms(geneset, assoc,", "go /n is different between the study group and the population \"\"\" if", "= float(study_go) / study_n p = float(pop_go) / pop_n if s > p:", "of terms in the study group \"\"\" term_cnt = Counter() for gene in", "assoc, obo_dag): \"\"\"count the number of terms in the study group \"\"\" term_cnt", "collections import defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the number of terms", "in geneset if g in assoc): for x in assoc[gene]: if x in", "in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc, geneset, assoc, obo_dag, log):", "\"\"\"Get the terms in the study group \"\"\" term2itemids = defaultdict(set) genes =", "the study group and the population \"\"\" if min_ratio is None: return True", "g in geneset if g in assoc] for gene in genes: for x", "> min_ratio return p / s > min_ratio # Copyright (C) 2010-2016, <NAME>", "term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset)))", "All rights reserved.\" __author__ = \"various\" from collections import defaultdict, Counter def count_terms(geneset,", "study group \"\"\" term2itemids = defaultdict(set) genes = [g for g in geneset", "ratio go /n is different between the study group and the population \"\"\"", "p: return s / p > min_ratio return p / s > min_ratio", "gene in (g for g in geneset if g in assoc): for x", "check if the ratio go /n is different between the study group and", "if g in assoc] for gene in genes: for x in assoc[gene]: if", "term2itemids = defaultdict(set) genes = [g for g in geneset if g in", "N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check if", "for g in geneset if g in assoc] for gene in genes: for", "\"\"\" term2itemids = defaultdict(set) genes = [g for g in geneset if g", "in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items", "genes = [g for g in geneset if g in assoc] for gene", "defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the number of terms in the", "/ study_n p = float(pop_go) / pop_n if s > p: return s", "(g for g in geneset if g in assoc): for x in assoc[gene]:", "term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check if the ratio go", "None: return True s = float(study_go) / study_n p = float(pop_go) / pop_n", "different between the study group and the population \"\"\" if min_ratio is None:", "for gene in (g for g in geneset if g in assoc): for", "x in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC}", "log.write(\"{N:>6,} out of {M:>6,} {DESC} items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return", "in genes: for x in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out", "/ pop_n if s > p: return s / p > min_ratio return", "in (g for g in geneset if g in assoc): for x in", "p = float(pop_go) / pop_n if s > p: return s / p", "population \"\"\" if min_ratio is None: return True s = float(study_go) / study_n", "= Counter() for gene in (g for g in geneset if g in", "def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check if the ratio go /n", "study_go, study_n, pop_go, pop_n): \"\"\" check if the ratio go /n is different", "out of {M:>6,} {DESC} items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids", "pop_n): \"\"\" check if the ratio go /n is different between the study", "if min_ratio is None: return True s = float(study_go) / study_n p =", "python # -*- coding: UTF-8 -*- __copyright__ = \"Copyright (C) 2010-2016, <NAME> al.,", "assoc] for gene in genes: for x in assoc[gene]: if x in obo_dag:", "geneset if g in assoc): for x in assoc[gene]: if x in obo_dag:", "items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n,", "genes: for x in assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of", "if the ratio go /n is different between the study group and the", "study_n, pop_go, pop_n): \"\"\" check if the ratio go /n is different between", "1 return term_cnt def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the terms in", "DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check", "= defaultdict(set) genes = [g for g in geneset if g in assoc]", "in assoc] for gene in genes: for x in assoc[gene]: if x in", "{M:>6,} {DESC} items found in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio,", "is None: return True s = float(study_go) / study_n p = float(pop_go) /", "if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items found in", "in the study group \"\"\" term_cnt = Counter() for gene in (g for", "number of terms in the study group \"\"\" term_cnt = Counter() for gene", "if x in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc, geneset, assoc,", "import defaultdict, Counter def count_terms(geneset, assoc, obo_dag): \"\"\"count the number of terms in", "/ p > min_ratio return p / s > min_ratio # Copyright (C)", "obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items found in association\\n\".format( DESC=desc, N=len(genes),", "<NAME> al., All rights reserved.\" __author__ = \"various\" from collections import defaultdict, Counter", "the ratio go /n is different between the study group and the population", "+= 1 return term_cnt def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the terms", "g in geneset if g in assoc): for x in assoc[gene]: if x", "g in assoc): for x in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] +=", "assoc[gene]: if x in obo_dag: term2itemids[obo_dag[x].id].add(gene) log.write(\"{N:>6,} out of {M:>6,} {DESC} items found", "in association\\n\".format( DESC=desc, N=len(genes), M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n):", "2010-2016, <NAME> al., All rights reserved.\" __author__ = \"various\" from collections import defaultdict,", "the study group \"\"\" term_cnt = Counter() for gene in (g for g", "is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check if the ratio go /n is", "count_terms(geneset, assoc, obo_dag): \"\"\"count the number of terms in the study group \"\"\"", "M=len(geneset))) return term2itemids def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n): \"\"\" check if the", "\"\"\" if min_ratio is None: return True s = float(study_go) / study_n p", "def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the terms in the study group", "group and the population \"\"\" if min_ratio is None: return True s =", "and the population \"\"\" if min_ratio is None: return True s = float(study_go)", "\"Copyright (C) 2010-2016, <NAME> al., All rights reserved.\" __author__ = \"various\" from collections", "True s = float(study_go) / study_n p = float(pop_go) / pop_n if s", "group \"\"\" term_cnt = Counter() for gene in (g for g in geneset", "__copyright__ = \"Copyright (C) 2010-2016, <NAME> al., All rights reserved.\" __author__ = \"various\"", "term_cnt[obo_dag[x].id] += 1 return term_cnt def get_terms(desc, geneset, assoc, obo_dag, log): \"\"\"Get the", "terms in the study group \"\"\" term_cnt = Counter() for gene in (g", "x in assoc[gene]: if x in obo_dag: term_cnt[obo_dag[x].id] += 1 return term_cnt def" ]
[ "baselines.common import tf_util as U from baselines import logger from baselines.ppo1 import pposgd_simple", "baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def", "from baselines.common.cmd_util import make_mujoco_env from baselines.common import tf_util as U from baselines import", "seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99,", ") env.close() return pi if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"] )", "import tf_util as U from baselines import logger from baselines.ppo1 import pposgd_simple from", "U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env =", "optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi if __name__ == '__main__':", "env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10,", "clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi if", "schedule='linear', ) env.close() return pi if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"]", "def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space,", "make_mujoco_env from baselines.common import tf_util as U from baselines import logger from baselines.ppo1", "__name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"] ) pi = train('InvertedPendulum-v2', num_timesteps=5000, seed=0)", "optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi if __name__ ==", "seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env", "def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id,", "pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', )", "from baselines.common import tf_util as U from baselines import logger from baselines.ppo1 import", "= pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear',", "if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"] ) pi = train('InvertedPendulum-v2', num_timesteps=5000,", "ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi", "train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6,", "import make_mujoco_env from baselines.common import tf_util as U from baselines import logger from", "tf_util as U from baselines import logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim", "import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name,", "cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return", "return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env,", "policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed)", "import logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps,", "baselines import logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id,", "timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi", "from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space):", "env.close() return pi if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"] ) pi", "policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close()", "pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space,", "cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn,", "U from baselines import logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy", "return pi if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"] ) pi =", "pi if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\", format_strs=[\"tensorboard\"] ) pi = train('InvertedPendulum-v2',", "= make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4,", "ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048,", "optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi if __name__ == '__main__': logger.configure(dir", "ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi =", "entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi if __name__", "import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name,", "gamma=0.99, lam=0.95, schedule='linear', ) env.close() return pi if __name__ == '__main__': logger.configure(dir =", "num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2)", "from baselines import logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def", "from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__()", "max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95, schedule='linear', ) env.close() return", "pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64, gamma=0.99, lam=0.95,", "as U from baselines import logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import", "baselines.common.cmd_util import make_mujoco_env from baselines.common import tf_util as U from baselines import logger", "logger from baselines.ppo1 import pposgd_simple from cartpole.cartpole_sim import cartpole_policy def train(env_id, num_timesteps, seed=0):", "cartpole_policy def train(env_id, num_timesteps, seed=0): U.make_session(num_cpu=1).__enter__() def policy_fn(name, ob_space, ac_space): return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space,", "num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0,", "hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2,", "make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps, timesteps_per_actorbatch=2048, clip_param=0.2, entcoeff=0.0, optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,", "lam=0.95, schedule='linear', ) env.close() return pi if __name__ == '__main__': logger.configure(dir = \"./tensorboard_test\",", "ob_space=ob_space, ac_space=ac_space, hid_size=6, num_hid_layers=2) env = make_mujoco_env(env_id, seed) pi = pposgd_simple.learn(env, policy_fn, max_timesteps=num_timesteps," ]
[ "programmatic access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\":", "('name_from_east', {'type': 'str', 'label': 'name from east', 'tooltip': 'String used to describe this", "to unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \"", "selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word that should precede \"", "the west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\",", "'show first visit message if dark', 'tooltip': \"Enable/disable showing the \" \"first visit", "door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code required to", "tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string used to describe this \" \"tile", "self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\":", "displayed only when player \" \"enters this tile for the first time\"}), ('first_visit_message_in_dark',", "\"first visit message if the current tile \" \"is dark\"}), ('smell_description', {'type': 'str',", "description', 'tooltip': \"String displayed when player \" \"tastes the ground on the current", "\"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}), (\"code\", {\"type\": \"int\",", "\"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings):", "door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of this door, e.g. \" \"'wooden door'", "to the south'}), ('name_from_east', {'type': 'str', 'label': 'name from east', 'tooltip': 'String used", "\"tooltip\": \"Enable/disable wall to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to", "wall to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the west\"})", "player \" \"enters this tile for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label':", "]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\",", "KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\":", "'ground taste description', 'tooltip': \"String displayed when player \" \"tastes the ground on", "displayed when player \" \"smells the ground on the current tile\"}), ('ground_taste_description', {'type':", "showing the \" \"first visit message if the current tile \" \"is dark\"}),", "\"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code required to unlock this door\"}),", "'ground smell description', 'tooltip': \"String displayed when player \" \"smells the ground on", "taste description', 'tooltip': \"String displayed when player \" \"tastes the ground on the", "\"keypad code\", \"tooltip\": \"Integer \" \"code required to unlock this door\"}), (\"prompt\", {\"type\":", "'name from west', 'tooltip': 'String used to describe this tile when' ' player", "'tooltip': \"String displayed when player smells \" \"the air on the current tile\"}),", "the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the west\"}) ]) class", "a \" \"light source to see anything on this tile\"}), ('first_visit_message', {'type': 'long_str',", "wall to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the south\"}),", "be prefixed with 'You are' during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If", "'String used to describe this tile when' ' player is on the adjacent", "DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\":", "(\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code required to unlock", "('first_visit_message', {'type': 'long_str', 'label': 'first visit message', 'tooltip': \"String displayed only when player", "string used to describe this \" \"tile to the player from afar, e.g.", "\"Short string used to describe this \" \"tile to the player from afar,", "\"east\", \"west\"], \"tooltip\": \"Set the direction to this door from currently\" \" selected", "they enter it. Note that this \" \"string will always be prefixed with", "on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit message', 'tooltip': \"String displayed", "\"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique", "precede \" \"the name of this door, usually 'a' or 'an' (e.g. 'a'", "south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the east\"}), (\"west\", {\"type\": \"bool\",", "to the east'}), ('name_from_west', {'type': 'str', 'label': 'name from west', 'tooltip': 'String used", "{'type': 'str', 'tooltip': \"Short string used to describe this \" \"tile to the", "from afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to describe", "('name_from_south', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used to describe this", "to prompt player for keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([", "'label': 'ground taste description', 'tooltip': \"String displayed when player \" \"tastes the ground", "smell description', 'tooltip': \"String displayed when player \" \"smells the ground on the", "player \" \"tastes the ground on the current tile\"}), ('name_from_north', {'type': 'str', 'label':", "\"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this door\"})", "\"Enable/disable wall to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the", "'label': 'show first visit message if dark', 'tooltip': \"Enable/disable showing the \" \"first", "wall to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the east\"}),", "entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID',", "'label': 'ground smell description', 'tooltip': \"String displayed when player \" \"smells the ground", "the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the east\"}), (\"west\", {\"type\":", "(\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word that should precede \" \"the name", "to the player from afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String", "'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier for programmatic access to this tile\"}),", "source to see anything on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit", "'tile ID', \"tooltip\": \"Unique \" \"identifier for programmatic access to this tile\"}), ('name',", "\" \"identifier for programmatic access to this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad", "direction to this door from currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\":", "will need a \" \"light source to see anything on this tile\"}), ('first_visit_message',", "\" \"smells the ground on the current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground", "('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip': \"String displayed when player smells \"", "east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the west\"}) ]) class DoorSettings(AutoFormSettings):", "\"tooltip\": \"Enable/disable wall to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to", "east'}), ('name_from_west', {'type': 'str', 'label': 'name from west', 'tooltip': 'String used to describe", "tile \" \"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip': \"String displayed", "(\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used to prompt player", "ground on the current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from south', 'tooltip':", "\" \"light source to see anything on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label':", "\" \"wooden door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of this", "current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste description', 'tooltip': \"String displayed when", "player is on the adjacent tile to the south'}), ('name_from_south', {'type': 'str', 'label':", "programmatic access to this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer", "current tile \" \"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip': \"String", "\"str\", \"tooltip\": \"name of this door, e.g. \" \"'wooden door' or 'oak door'\"}),", "tile to the east'}), ('name_from_west', {'type': 'str', 'label': 'name from west', 'tooltip': 'String", "OrderedDict class AutoFormSettings(object): def __init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has", "None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to", "\" \"used to prompt player for keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec", "prompt player for keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id',", "visit message if dark', 'tooltip': \"Enable/disable showing the \" \"first visit message if", "\"str\", \"tooltip\": \"Set the word that should precede \" \"the name of this", "code\", \"tooltip\": \"Integer \" \"code required to unlock this door\"}), (\"prompt\", {\"type\": \"str\",", "the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if dark',", "afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to describe \"", "this \" \"tile to the player from afar, e.g. 'a scary room'\"}), ('description',", "ground on the current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste description', 'tooltip':", "'label': 'name from east', 'tooltip': 'String used to describe this tile when' '", "the ground on the current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from south',", "the adjacent tile to the south'}), ('name_from_east', {'type': 'str', 'label': 'name from east',", "to this door from currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set", "from currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word that", "when player \" \"enters this tile for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool',", "dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip': \"String displayed when player smells", "the south'}), ('name_from_south', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used to", "\"tooltip\": \"name of this door, e.g. \" \"'wooden door' or 'oak door'\"}), (\"tile_id\",", "{\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to", "current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell description', 'tooltip': \"String displayed when", "always be prefixed with 'You are' during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip':", "programmatic access to this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string used to", "spec = OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier", "Note that this \" \"string will always be prefixed with 'You are' during", "('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if dark', 'tooltip': \"Enable/disable showing", "{\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction to this", "for programmatic access to this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\":", "for keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str',", "\" \"the name of this door, usually 'a' or 'an' (e.g. 'a' \"", "to describe this tile when' ' player is on the adjacent tile to", "\"identifier for programmatic access to this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string", "'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for", "to this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code", "\" \"the air on the current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell", "self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([", "= OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier for", "attrname in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\":", "{\"type\": \"str\", \"tooltip\": \"Set the word that should precede \" \"the name of", "player \" \"smells the ground on the current tile\"}), ('ground_taste_description', {'type': 'str', 'label':", "description', 'tooltip': \"String displayed when player smells \" \"the air on the current", "AutoFormSettings(object): def __init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no 'spec'", "\"String \" \"used to prompt player for keypad code entry\"}) ]) class TileSettings(AutoFormSettings):", "'name from east', 'tooltip': 'String used to describe this tile when' ' player", "to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\":", "tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used to describe", "when player \" \"smells the ground on the current tile\"}), ('ground_taste_description', {'type': 'str',", "\"tooltip\": \"Integer \" \"code required to unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\":", "\"If enabled, player will need a \" \"light source to see anything on", "{\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code required to unlock this", "the south'}), ('name_from_east', {'type': 'str', 'label': 'name from east', 'tooltip': 'String used to", "anything on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit message', 'tooltip': \"String", "tile when' ' player is on the adjacent tile to the east'}), ('name_from_west',", "\"tooltip\": \"Enable/disable wall to the west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\",", "instance has no 'spec' attribute\" % self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname,", "door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of this door, e.g.", "'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of this door, e.g. \"", "\"tile to the player from afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip':", "\"smells the ground on the current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste", "name of this door, usually 'a' or 'an' (e.g. 'a' \" \"wooden door,", "'str', 'label': 'name from west', 'tooltip': 'String used to describe this tile when'", "currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word that should", "on the adjacent tile to the east'}), ('name_from_west', {'type': 'str', 'label': 'name from", "(e.g. 'a' \" \"wooden door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name", "('ground_taste_description', {'type': 'str', 'label': 'ground taste description', 'tooltip': \"String displayed when player \"", "\"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction to this door from", "is on the adjacent tile to the east'}), ('name_from_west', {'type': 'str', 'label': 'name", "the current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used", "\"bool\", \"tooltip\": \"Enable/disable wall to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall", "{\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable", "{'type': 'bool', 'label': 'show first visit message if dark', 'tooltip': \"Enable/disable showing the", "that should precede \" \"the name of this door, usually 'a' or 'an'", "' player is on the adjacent tile to the east'}), ('name_from_west', {'type': 'str',", "'label': 'name from west', 'tooltip': 'String used to describe this tile when' '", "class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"],", "time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if dark', 'tooltip': \"Enable/disable", "if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no 'spec' attribute\" % self.__class__.__name__)", "usually 'a' or 'an' (e.g. 'a' \" \"wooden door, 'an' oak door)\"}), (\"name\",", "the \" \"first visit message if the current tile \" \"is dark\"}), ('smell_description',", "ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}) ]) class", "'tooltip': \"String used to describe \" \"the tile to player when they enter", "'tooltip': \"String displayed when player \" \"tastes the ground on the current tile\"}),", "(\"name\", {\"type\": \"str\", \"tooltip\": \"name of this door, e.g. \" \"'wooden door' or", "\"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used to prompt player for keypad code", "\" \"tastes the ground on the current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name", "this tile when' ' player is on the adjacent tile to the south'}),", "__init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no 'spec' attribute\" %", "player for keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type':", "the current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste description', 'tooltip': \"String displayed", "smells \" \"the air on the current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground", "access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\",", "for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if", "\"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used to prompt player for keypad", "only when player \" \"enters this tile for the first time\"}), ('first_visit_message_in_dark', {'type':", "\"the tile to player when they enter it. Note that this \" \"string", "\" \"enters this tile for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show", "to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the west\"}) ])", "\" \"tile to the player from afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str',", "door, usually 'a' or 'an' (e.g. 'a' \" \"wooden door, 'an' oak door)\"}),", "\"light source to see anything on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first", "'tooltip': \"If enabled, player will need a \" \"light source to see anything", "\" \"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip': \"String displayed when", "\"wooden door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of this door,", "{'type': 'str', 'label': 'name from east', 'tooltip': 'String used to describe this tile", "TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique \"", "visit message', 'tooltip': \"String displayed only when player \" \"enters this tile for", "\"west\"], \"tooltip\": \"Set the direction to this door from currently\" \" selected tile\"}),", "need a \" \"light source to see anything on this tile\"}), ('first_visit_message', {'type':", "on the current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste description', 'tooltip': \"String", "spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the north\"}), (\"south\",", "used to describe this \" \"tile to the player from afar, e.g. 'a", "to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the east\"}), (\"west\",", "to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the south\"}), (\"east\",", "[\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction to this door from currently\"", "{\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the west\"}) ]) class DoorSettings(AutoFormSettings): spec =", "describe this \" \"tile to the player from afar, e.g. 'a scary room'\"}),", "= OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the", "this door, e.g. \" \"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\":", "\" \"the tile to player when they enter it. Note that this \"", "\"String displayed when player smells \" \"the air on the current tile\"}), ('ground_smell_description',", "on the adjacent tile to the south'}), ('name_from_east', {'type': 'str', 'label': 'name from", "\"Set the direction to this door from currently\" \" selected tile\"}), (\"prefix\", {\"type\":", "\"tooltip\": \"Enable/disable wall to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to", "used to describe this tile when' ' player is on the adjacent tile", "'tooltip': \"Short string used to describe this \" \"tile to the player from", "door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \"", "'label': 'first visit message', 'tooltip': \"String displayed only when player \" \"enters this", "door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic", "\"unique \" \"identifier for programmatic access to this door\"}), (\"code\", {\"type\": \"int\", \"label\":", "the word that should precede \" \"the name of this door, usually 'a'", "OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction", "see anything on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit message', 'tooltip':", "\"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction to this door from currently\" \"", "\" \"string will always be prefixed with 'You are' during \" \"gameplay\"}), ('dark',", "\"tooltip\": \"Set the word that should precede \" \"the name of this door,", "{'type': 'str', 'label': 'name from west', 'tooltip': 'String used to describe this tile", "message if dark', 'tooltip': \"Enable/disable showing the \" \"first visit message if the", "door from currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word", "(\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access", "'a' or 'an' (e.g. 'a' \" \"wooden door, 'an' oak door)\"}), (\"name\", {\"type\":", "\"bool\", \"tooltip\": \"Enable/disable wall to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall", "required to unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String", "tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit message', 'tooltip': \"String displayed only when", "player is on the adjacent tile to the east'}), ('name_from_west', {'type': 'str', 'label':", "'spec' attribute\" % self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings):", "this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string used to describe this \"", "'You are' during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled, player will", "{'type': 'bool', 'tooltip': \"If enabled, player will need a \" \"light source to", "class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"],", "message', 'tooltip': \"String displayed only when player \" \"enters this tile for the", "'str', 'tooltip': \"Short string used to describe this \" \"tile to the player", "\"String used to describe \" \"the tile to player when they enter it.", "to see anything on this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit message',", "(\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\":", "if the current tile \" \"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description',", "code entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile", "air on the current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell description', 'tooltip':", "spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set", "\"used to prompt player for keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec =", "\"bool\", \"tooltip\": \"Enable/disable wall to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall", "'bool', 'tooltip': \"If enabled, player will need a \" \"light source to see", "are' during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled, player will need", "\"bool\", \"tooltip\": \"Enable/disable wall to the west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([", "\" \"first visit message if the current tile \" \"is dark\"}), ('smell_description', {'type':", "setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable", "{'type': 'str', 'label': 'smell description', 'tooltip': \"String displayed when player smells \" \"the", "'str', 'label': 'ground smell description', 'tooltip': \"String displayed when player \" \"smells the", "message if the current tile \" \"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell", "has no 'spec' attribute\" % self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname, None)", "current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used to", "\"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled, player will need a \" \"light", "\"String displayed when player \" \"smells the ground on the current tile\"}), ('ground_taste_description',", "\"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}),", "when' ' player is on the adjacent tile to the south'}), ('name_from_south', {'type':", "in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\",", "'bool', 'label': 'show first visit message if dark', 'tooltip': \"Enable/disable showing the \"", "during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled, player will need a", "\"Enable/disable wall to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the", "{\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable", "{'type': 'str', 'label': 'name from south', 'tooltip': 'String used to describe this tile", "OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier for programmatic", "first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if dark', 'tooltip':", "raise RuntimeError(\"%s instance has no 'spec' attribute\" % self.__class__.__name__) for attrname in self.spec.keys():", "attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall", "\" \"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\":", "'tooltip': 'String used to describe this tile when' ' player is on the", "tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell description', 'tooltip': \"String displayed when player", "of this door, e.g. \" \"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\",", "the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the south\"}), (\"east\", {\"type\":", "or 'an' (e.g. 'a' \" \"wooden door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\",", "tile to the south'}), ('name_from_east', {'type': 'str', 'label': 'name from east', 'tooltip': 'String", "class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique", "'smell description', 'tooltip': \"String displayed when player smells \" \"the air on the", "prompt\", \"tooltip\": \"String \" \"used to prompt player for keypad code entry\"}) ])", "'a' \" \"wooden door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of", "adjacent tile to the east'}), ('name_from_west', {'type': 'str', 'label': 'name from west', 'tooltip':", "word that should precede \" \"the name of this door, usually 'a' or", "this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used to", "class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the", "when' ' player is on the adjacent tile to the south'}), ('name_from_east', {'type':", "\"tooltip\": \"Set the direction to this door from currently\" \" selected tile\"}), (\"prefix\",", "this \" \"string will always be prefixed with 'You are' during \" \"gameplay\"}),", "door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used to prompt", "\"the air on the current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell description',", "OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the north\"}), (\"south\", {\"type\": \"bool\",", "east', 'tooltip': 'String used to describe this tile when' ' player is on", "'tooltip': \"String displayed when player \" \"smells the ground on the current tile\"}),", "this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\",", "ID', \"tooltip\": \"Unique \" \"identifier for programmatic access to this tile\"}), ('name', {'type':", "on the current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell description', 'tooltip': \"String", "\"tooltip\": \"String \" \"used to prompt player for keypad code entry\"}) ]) class", "from west', 'tooltip': 'String used to describe this tile when' ' player is", "enabled, player will need a \" \"light source to see anything on this", "\"identifier for programmatic access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([", "\" \"identifier for programmatic access to this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short", "{\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable", "{\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used to prompt player for", "\"String displayed when player \" \"tastes the ground on the current tile\"}), ('name_from_north',", "player when they enter it. Note that this \" \"string will always be", "player is on the adjacent tile to the south'}), ('name_from_east', {'type': 'str', 'label':", "describe \" \"the tile to player when they enter it. Note that this", "when they enter it. Note that this \" \"string will always be prefixed", "should precede \" \"the name of this door, usually 'a' or 'an' (e.g.", "('name_from_west', {'type': 'str', 'label': 'name from west', 'tooltip': 'String used to describe this", "class AutoFormSettings(object): def __init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no", "displayed when player smells \" \"the air on the current tile\"}), ('ground_smell_description', {'type':", "attribute\" % self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec", "south', 'tooltip': 'String used to describe this tile when' ' player is on", "to describe this \" \"tile to the player from afar, e.g. 'a scary", "for attrname in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\",", "e.g. \" \"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\",", "keypad code entry\"}) ]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str', 'label':", "'first visit message', 'tooltip': \"String displayed only when player \" \"enters this tile", "west', 'tooltip': 'String used to describe this tile when' ' player is on", "\"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}), (\"code\",", "('ground_smell_description', {'type': 'str', 'label': 'ground smell description', 'tooltip': \"String displayed when player \"", "('dark', {'type': 'bool', 'tooltip': \"If enabled, player will need a \" \"light source", "{'type': 'long_str', 'label': 'first visit message', 'tooltip': \"String displayed only when player \"", "'label': 'smell description', 'tooltip': \"String displayed when player smells \" \"the air on", "\"Integer \" \"code required to unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad", "used to describe \" \"the tile to player when they enter it. Note", "\"Enable/disable wall to the west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\":", "no 'spec' attribute\" % self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname, None) class", "when' ' player is on the adjacent tile to the east'}), ('name_from_west', {'type':", "\"Set the word that should precede \" \"the name of this door, usually", "to the west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\":", "(\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the east\"}), (\"west\", {\"type\": \"bool\", \"tooltip\":", "tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste description', 'tooltip': \"String displayed when player", "if dark', 'tooltip': \"Enable/disable showing the \" \"first visit message if the current", "'an' (e.g. 'a' \" \"wooden door, 'an' oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\":", "enter it. Note that this \" \"string will always be prefixed with 'You", "'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to describe \" \"the tile", "describe this tile when' ' player is on the adjacent tile to the", "door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\",", "the current tile \" \"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip':", "\" \"code required to unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\",", "the east'}), ('name_from_west', {'type': 'str', 'label': 'name from west', 'tooltip': 'String used to", "hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no 'spec' attribute\" % self.__class__.__name__) for attrname", "prefixed with 'You are' during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled,", "RuntimeError(\"%s instance has no 'spec' attribute\" % self.__class__.__name__) for attrname in self.spec.keys(): setattr(self,", "' player is on the adjacent tile to the south'}), ('name_from_east', {'type': 'str',", "is on the adjacent tile to the south'}), ('name_from_east', {'type': 'str', 'label': 'name", "this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code required", "\"Enable/disable wall to the south\"}), (\"east\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the", "('name', {'type': 'str', 'tooltip': \"Short string used to describe this \" \"tile to", "\"tastes the ground on the current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from", "\"Enable/disable showing the \" \"first visit message if the current tile \" \"is", "\"unique \" \"identifier for programmatic access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec", "from collections import OrderedDict class AutoFormSettings(object): def __init__(self): if not hasattr(self, \"spec\"): raise", "this tile for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit", "this door from currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the", "\"string will always be prefixed with 'You are' during \" \"gameplay\"}), ('dark', {'type':", "west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\",", "for programmatic access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\",", "wall to the west\"}) ]) class DoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\",", "the player from afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used", "this door, usually 'a' or 'an' (e.g. 'a' \" \"wooden door, 'an' oak", "from south', 'tooltip': 'String used to describe this tile when' ' player is", "to the south'}), ('name_from_south', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used", "not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no 'spec' attribute\" % self.__class__.__name__) for", "adjacent tile to the south'}), ('name_from_south', {'type': 'str', 'label': 'name from south', 'tooltip':", "{\"type\": \"str\", \"tooltip\": \"name of this door, e.g. \" \"'wooden door' or 'oak", "{'type': 'str', 'label': 'ground taste description', 'tooltip': \"String displayed when player \" \"tastes", "]) class TileSettings(AutoFormSettings): spec = OrderedDict([ ('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\":", "room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to describe \" \"the tile to player", "unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\": \"String \" \"used", "\"code required to unlock this door\"}), (\"prompt\", {\"type\": \"str\", \"label\": \"keypad prompt\", \"tooltip\":", "this tile when' ' player is on the adjacent tile to the east'}),", "e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to describe \" \"the", "\"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}) ])", "first visit message if dark', 'tooltip': \"Enable/disable showing the \" \"first visit message", "on the adjacent tile to the south'}), ('name_from_south', {'type': 'str', 'label': 'name from", "with 'You are' during \" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled, player", "visit message if the current tile \" \"is dark\"}), ('smell_description', {'type': 'str', 'label':", "{'type': 'str', 'label': 'ground smell description', 'tooltip': \"String displayed when player \" \"smells", "= OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the north\"}), (\"south\", {\"type\":", "dark', 'tooltip': \"Enable/disable showing the \" \"first visit message if the current tile", "for programmatic access to this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string used", "ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this door\"}), (\"code\", {\"type\":", "access to this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\", \"tooltip\": \"Integer \"", "it. Note that this \" \"string will always be prefixed with 'You are'", "\"spec\"): raise RuntimeError(\"%s instance has no 'spec' attribute\" % self.__class__.__name__) for attrname in", "('tile_id', {'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier for programmatic access", "door, e.g. \" \"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile", "'str', 'label': 'ground taste description', 'tooltip': \"String displayed when player \" \"tastes the", "when player \" \"tastes the ground on the current tile\"}), ('name_from_north', {'type': 'str',", "adjacent tile to the south'}), ('name_from_east', {'type': 'str', 'label': 'name from east', 'tooltip':", "% self.__class__.__name__) for attrname in self.spec.keys(): setattr(self, attrname, None) class WallSettings(AutoFormSettings): spec =", "'str', 'label': 'name from east', 'tooltip': 'String used to describe this tile when'", "tile when' ' player is on the adjacent tile to the west'}) ])", "\"name of this door, e.g. \" \"'wooden door' or 'oak door'\"}), (\"tile_id\", {\"type\":", "on the current tile\"}), ('name_from_north', {'type': 'str', 'label': 'name from south', 'tooltip': 'String", "]) class KeypadDoorSettings(AutoFormSettings): spec = OrderedDict([ (\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\",", "\"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier for programmatic access to this", "('name_from_north', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used to describe this", "from east', 'tooltip': 'String used to describe this tile when' ' player is", "'name from south', 'tooltip': 'String used to describe this tile when' ' player", "(\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\":", "\"label\": \"keypad code\", \"tooltip\": \"Integer \" \"code required to unlock this door\"}), (\"prompt\",", "to this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string used to describe this", "'tooltip': \"Enable/disable showing the \" \"first visit message if the current tile \"", "' player is on the adjacent tile to the south'}), ('name_from_south', {'type': 'str',", "the adjacent tile to the east'}), ('name_from_west', {'type': 'str', 'label': 'name from west',", "\" selected tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word that should precede", "\"the name of this door, usually 'a' or 'an' (e.g. 'a' \" \"wooden", "description', 'tooltip': \"String displayed when player \" \"smells the ground on the current", "'long_str', 'label': 'first visit message', 'tooltip': \"String displayed only when player \" \"enters", "south'}), ('name_from_east', {'type': 'str', 'label': 'name from east', 'tooltip': 'String used to describe", "tile\"}), (\"prefix\", {\"type\": \"str\", \"tooltip\": \"Set the word that should precede \" \"the", "tile to the south'}), ('name_from_south', {'type': 'str', 'label': 'name from south', 'tooltip': 'String", "collections import OrderedDict class AutoFormSettings(object): def __init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s", "tile when' ' player is on the adjacent tile to the south'}), ('name_from_south',", "when player smells \" \"the air on the current tile\"}), ('ground_smell_description', {'type': 'str',", "to describe \" \"the tile to player when they enter it. Note that", "\"identifier for programmatic access to this door\"}), (\"code\", {\"type\": \"int\", \"label\": \"keypad code\",", "'str', 'label': 'name from south', 'tooltip': 'String used to describe this tile when'", "displayed when player \" \"tastes the ground on the current tile\"}), ('name_from_north', {'type':", "the ground on the current tile\"}), ('ground_taste_description', {'type': 'str', 'label': 'ground taste description',", "\"keypad prompt\", \"tooltip\": \"String \" \"used to prompt player for keypad code entry\"})", "{'type':'long_str', 'tooltip': \"String used to describe \" \"the tile to player when they", "'label': 'name from south', 'tooltip': 'String used to describe this tile when' '", "\"enters this tile for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first", "the current tile\"}), ('ground_smell_description', {'type': 'str', 'label': 'ground smell description', 'tooltip': \"String displayed", "tile to player when they enter it. Note that this \" \"string will", "tile when' ' player is on the adjacent tile to the south'}), ('name_from_east',", "player will need a \" \"light source to see anything on this tile\"}),", "player from afar, e.g. 'a scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to", "'tooltip': \"String displayed only when player \" \"enters this tile for the first", "{'type': 'str', 'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier for programmatic access to", "scary room'\"}), ('description', {'type':'long_str', 'tooltip': \"String used to describe \" \"the tile to", "or 'oak door'\"}), (\"tile_id\", {\"type\": \"str\", \"label\": \"tile ID\", \"tooltip\": \"unique \" \"identifier", "that this \" \"string will always be prefixed with 'You are' during \"", "\"tooltip\": \"Unique \" \"identifier for programmatic access to this tile\"}), ('name', {'type': 'str',", "is on the adjacent tile to the south'}), ('name_from_south', {'type': 'str', 'label': 'name", "(\"west\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the west\"}) ]) class DoorSettings(AutoFormSettings): spec", "the direction to this door from currently\" \" selected tile\"}), (\"prefix\", {\"type\": \"str\",", "this tile when' ' player is on the adjacent tile to the west'})", "\"is dark\"}), ('smell_description', {'type': 'str', 'label': 'smell description', 'tooltip': \"String displayed when player", "the adjacent tile to the south'}), ('name_from_south', {'type': 'str', 'label': 'name from south',", "('description', {'type':'long_str', 'tooltip': \"String used to describe \" \"the tile to player when", "south'}), ('name_from_south', {'type': 'str', 'label': 'name from south', 'tooltip': 'String used to describe", "will always be prefixed with 'You are' during \" \"gameplay\"}), ('dark', {'type': 'bool',", "import OrderedDict class AutoFormSettings(object): def __init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance", "north\"}), (\"south\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the south\"}), (\"east\", {\"type\": \"bool\",", "WallSettings(AutoFormSettings): spec = OrderedDict([ (\"north\", {\"type\": \"bool\", \"tooltip\": \"Enable/disable wall to the north\"}),", "\"String displayed only when player \" \"enters this tile for the first time\"}),", "to player when they enter it. Note that this \" \"string will always", "\" \"identifier for programmatic access to this door\"}) ]) class KeypadDoorSettings(AutoFormSettings): spec =", "\"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction to this door", "of this door, usually 'a' or 'an' (e.g. 'a' \" \"wooden door, 'an'", "'str', 'label': 'tile ID', \"tooltip\": \"Unique \" \"identifier for programmatic access to this", "this tile\"}), ('first_visit_message', {'type': 'long_str', 'label': 'first visit message', 'tooltip': \"String displayed only", "'str', 'label': 'smell description', 'tooltip': \"String displayed when player smells \" \"the air", "(\"direction\", {\"type\": \"choice\", \"choices\": [\"north\", \"south\", \"east\", \"west\"], \"tooltip\": \"Set the direction to", "player smells \" \"the air on the current tile\"}), ('ground_smell_description', {'type': 'str', 'label':", "access to this tile\"}), ('name', {'type': 'str', 'tooltip': \"Short string used to describe", "def __init__(self): if not hasattr(self, \"spec\"): raise RuntimeError(\"%s instance has no 'spec' attribute\"", "tile for the first time\"}), ('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message", "oak door)\"}), (\"name\", {\"type\": \"str\", \"tooltip\": \"name of this door, e.g. \" \"'wooden", "\"Unique \" \"identifier for programmatic access to this tile\"}), ('name', {'type': 'str', 'tooltip':", "\" \"gameplay\"}), ('dark', {'type': 'bool', 'tooltip': \"If enabled, player will need a \"" ]
[ "tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()),", "from timingsutil import Stopwatch import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self):", "def setUp(self): pass def tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch() for _", "Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()), 3) if __name__ ==", "import Stopwatch import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def", "TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch() for", "= logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_stopwatch(self): stopwatch", "timingsutil import Stopwatch import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass", "unittest from timingsutil import Stopwatch import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def", "time import unittest from timingsutil import Stopwatch import logging_helper logging = logging_helper.setup_logging() class", "Stopwatch import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self):", "encoding: utf-8 import time import unittest from timingsutil import Stopwatch import logging_helper logging", "import unittest from timingsutil import Stopwatch import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase):", "_ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()), 3) if __name__ == u'__main__': unittest.main()", "def tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch() for _ in range(3): time.sleep(1)", "pass def test_stopwatch(self): stopwatch = Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1)", "= Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()), 3) if __name__", "utf-8 import time import unittest from timingsutil import Stopwatch import logging_helper logging =", "import time import unittest from timingsutil import Stopwatch import logging_helper logging = logging_helper.setup_logging()", "stopwatch = Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()), 3) if", "def test_stopwatch(self): stopwatch = Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()),", "import logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass", "# encoding: utf-8 import time import unittest from timingsutil import Stopwatch import logging_helper", "setUp(self): pass def tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch() for _ in", "logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_stopwatch(self): stopwatch =", "pass def tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch() for _ in range(3):", "for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()), 3) if __name__ == u'__main__':", "class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_stopwatch(self): stopwatch = Stopwatch()", "logging_helper logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def", "logging = logging_helper.setup_logging() class TestConfiguration(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_stopwatch(self):", "test_stopwatch(self): stopwatch = Stopwatch() for _ in range(3): time.sleep(1) self.assertEqual(round(stopwatch.lap()), 1) self.assertEqual(round(stopwatch.stop()), 3)" ]
[ "elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not None #", "faz o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário com", "is not None # carrega os dados de procedimento de aproximação a partir", "# logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\") #", "l_node.isNull(): # tenta converter o nó em um elemento l_element = l_node.toElement() assert", ">------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações sobre o dicionário de procedimento de", "um procedimento de aproximação ? elif isinstance(f_data, CApxData): # copia o procedimento de", "import QtXml # model import model.items.apx_new as model import model.items.parser_utils as parser #", "da sub-árvore l_node = l_element.firstChild() assert l_node is not None # percorre a", "fs_apx_pn): \"\"\" carrega os dados do procedimento de aproximação de um arquivo em", "\"\"\" carrega os dados do procedimento de aproximação de um arquivo em disco", "o arquivo l_data_file.close() # obtém o elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement()", "aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não existe identificação else: #", "model.items.parser_utils as parser # control import control.events.events_basic as events # < class CApxData", "de procedimentos de aproximação @param fs_apx_pn: pathname do arquivo em disco \"\"\" #", "? if not l_element.isNull(): # faz o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element)", "dados do procedimento de aproximação de um arquivo em disco @param fs_apx_pn: pathname", "is not None # é uma procedimento de aproximação do newton ? if", "l_data_file.close() # obtém o elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root", "# dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # cria o", "self._event = f_model.event # recebeu dados ? if f_data is not None: #", "os elementos de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os", "arquivo do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\")", "... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param f_model:", "mensagem ls_msg = \"não tem identificação. Aproximação não incluída.\" # logger l_log =", "arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo", "dados de procedimento de aproximação a partir de um dicionário @param fdct_data: lista", "assert l_node is not None # percorre a sub-árvore while not l_node.isNull(): #", "procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\"", "de aproximação else: # carrega o dicionário de procedimento de aproximação de um", "dicionário de dados ldct_data = {} # inicia a lista de breakpoints ldct_data[\"breakpoints\"]", "aproximação.\") # cria um evento de quit l_evt = events.CQuit() assert l_evt #", "sobre o dicionário de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista>", "PyQt library from PyQt5 import QtCore # FIXME QtXml is no longer supported.", "em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os dados do", "l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\") # cria um evento de quit", "dos atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista com", "para todos os nós na lista... for li_ndx in range(l_node_list.length()): # inicia o", "assert fdct_data is not None # é uma procedimento de aproximação do newton", "</breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param f_model: model", "flag e mensagem \"\"\" # check input assert fdct_root is not None assert", "de procedimento de aproximação de um arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def", "carrega o dicionário de procedimento de aproximação de um arquivo em disco self.load_file(f_data)", "nó da sub-árvore l_node = l_element.firstChild() assert l_node is not None # percorre", "in ldct_tmp: # atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este", "# logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato aceito.\")", "de aproximação com os dados da lista pass # self.make_apx(f_data) # recebeu um", "de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data):", "isinstance(f_data, list): # cria um procedimento de aproximação com os dados da lista", "# FIXME QtXml is no longer supported. from PyQt5 import QtXml # model", "fdct_data: lista de dados de procedimento de aproximação @return flag e mensagem \"\"\"", "correta.\") # cria um evento de quit l_evt = events.CQuit() assert l_evt #", "# cria o documento XML do procedimento de aproximação # FIXME QtXml is", "termina a aplicação sys.exit(1) # fecha o arquivo l_data_file.close() # obtém o elemento", "l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) # cria um evento de quit", "l_log.critical(\"<E02: não está em um formato aceito.\") # cria um evento de quit", "# logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) #", "code lv_ok = True # mensagem ls_msg = \"save Ok\" # retorna flag", "= events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # se não for,", "disco @param fs_apx_pn: pathname do arquivo em disco \"\"\" # check input assert", "# para todos os nós na lista... for li_ndx in range(l_node_list.length()): # inicia", "de aproximação pass # self.copy_apx(f_data) # senão, recebeu o pathname de um arquivo", "sobre o dicionário de procedimento de aproximação revision 0.2 2015/nov mlabru pep8 style", "procedimentos de aproximação @param fs_apx_pn: pathname do arquivo em disco \"\"\" # check", "mantém as informações sobre o dicionário de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL", "control import control.events.events_basic as events # < class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\"", "fdct_data is not None # é uma procedimento de aproximação do newton ?", "# fecha o arquivo l_data_file.close() # obtém o elemento raíz do documento l_elem_root", "H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" #", "l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) # cria um", "carrega os dados de procedimento de aproximação a partir de um dicionário self.make_apx(ldct_root,", "procedimento de aproximação a partir de um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def", "del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados ldct_data.update(ldct_tmp) # próximo nó l_node", "o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é a", "import logging import sys # PyQt library from PyQt5 import QtCore # FIXME", "False, ls_msg # retorna Ok return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn):", "em disco \"\"\" # check input assert fs_apx_pn # carrega o arquivo de", "fecha o arquivo l_data_file.close() # obtém o elemento raíz do documento l_elem_root =", "evento de quit l_evt = events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt)", "? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não", "ls_msg = \"não tem identificação. Aproximação não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\")", "-*- \"\"\" apx_data mantém as informações sobre o dicionário de procedimento de aproximação", "de aproximação revision 0.2 2015/nov mlabru pep8 style conventions revision 0.1 2014/nov mlabru", "assert l_evt # dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) #", "coding: utf-8 -*- \"\"\" apx_data mantém as informações sobre o dicionário de procedimento", "# monta uma mensagem ls_msg = \"não tem identificação. Aproximação não incluída.\" #", "erro na abertura de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt =", "# termina a aplicação sys.exit(1) # cria o documento XML do procedimento de", "não for, cai fora... sys.exit(1) # é a assinatura do newton ? if", "não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não", "um elemento ? if not l_element.isNull(): # faz o parse do elemento ldct_tmp", "arquivo onde salvar @return flag e mensagem \"\"\" # return code lv_ok =", "dados ? if f_data is not None: # recebeu uma lista ? if", "fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] =", "no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None # erro", "< imports >-------------------------------------------------------------------------------------- # python library import logging import sys # PyQt library", "retorna Ok return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o", "not None # é uma procedimento de aproximação do newton ? if \"aproximacoes\"", "CApxData): # copia o procedimento de aproximação pass # self.copy_apx(f_data) # senão, recebeu", "logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de procedimentos de", "o breakpoint if \"breakpoint\" in ldct_tmp: # atualiza o dicionário com o breakpoint", "senão, recebeu o pathname de um arquivo de procedimento de aproximação else: #", "os dados de procedimento de aproximação a partir de um dicionário self.make_apx(ldct_root, ldct_data)", "um arquivo em disco @param fs_apx_pn: path name do arquivo onde salvar @return", "procedimento de aproximação de um arquivo em disco @param fs_apx_pn: pathname do arquivo", "de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo ? if not l_data_file.isOpen():", "sys.exit(1) # fecha o arquivo l_data_file.close() # obtém o elemento raíz do documento", "# check input assert fs_apx_pn # carrega o arquivo de procedimento de aproximação", "nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao>", "uma lista com os elementos de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") #", "sub-árvore l_node = l_element.firstChild() assert l_node is not None # percorre a sub-árvore", "revision 0.1 2014/nov mlabru initial release (Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- #", "self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados", "QtXml is no longer supported. from PyQt5 import QtXml # model import model.items.apx_new", "dados dos procedimentos de aproximação \"\"\" # check input assert f_model # inicia", "supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None # erro na carga", "# termina a aplicação sys.exit(1) # fecha o arquivo l_data_file.close() # obtém o", "for, cai fora... sys.exit(1) # verifica se existe identificação if \"nApx\" in fdct_data:", "breakpoints ldct_data[\"breakpoints\"] = [] # obtém um nó da lista l_element = l_node_list.at(li_ndx).toElement()", "\"\"\" mantém as informações sobre o dicionário de procedimento de aproximação <aproximacao nApx=\"1\">", "elemento ? if not l_element.isNull(): # faz o parse do elemento ldct_tmp =", "em disco @param fs_apx_pn: pathname do arquivo em disco \"\"\" # check input", "None # é uma procedimento de aproximação do newton ? if \"aproximacoes\" !=", "l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) # cria um evento de quit", "do arquivo em disco \"\"\" # check input assert fs_apx_pn # carrega o", "de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera>", "de aproximação @param fs_apx_pn: pathname do arquivo em disco \"\"\" # check input", "< class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações sobre o dicionário", "aproximação a partir de um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None):", "sys.exit(1) # é a assinatura do newton ? if \"1961\" != fdct_root[\"CODE\"]: #", "<pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def", "self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os dados do procedimento de", "--------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados de procedimento de aproximação", "mantém as informações sobre o dicionário de procedimento de aproximação revision 0.2 2015/nov", "events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1)", "procedimento de aproximação else: # carrega o dicionário de procedimento de aproximação de", "não existe identificação else: # monta uma mensagem ls_msg = \"não tem identificação.", "l_node = l_element.firstChild() assert l_node is not None # percorre a sub-árvore while", "o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário com o", "do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista com os elementos", "? elif isinstance(f_data, CApxData): # copia o procedimento de aproximação pass # self.copy_apx(f_data)", "dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da", "dicionário de procedimento de aproximação revision 0.2 2015/nov mlabru pep8 style conventions revision", "de procedimento de aproximação a partir de um dicionário @param fdct_data: lista de", "return False, ls_msg # retorna Ok return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self,", "documento XML do procedimento de aproximação # FIXME QtXml is no longer supported.", "l_evt # dissemina o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1)", "aproximação # FIXME QtXml is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx", "from PyQt5 import QtCore # FIXME QtXml is no longer supported. from PyQt5", "make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados de procedimento de aproximação a partir", "atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista com os", "assert l_evt # dissemina o evento self._event.post(l_evt) # se não for, cai fora...", "l_apx # senão, não existe identificação else: # monta uma mensagem ls_msg =", "aceito.\") # cria um evento de quit l_evt = events.CQuit() assert l_evt #", "l_element is not None # o nó é um elemento ? if not", "QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None # erro na carga do documento ?", "inicia o dicionário de dados ldct_data = {} # inicia a lista de", "l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt", "pass # self.make_apx(f_data) # recebeu um procedimento de aproximação ? elif isinstance(f_data, CApxData):", "f_model.event # recebeu dados ? if f_data is not None: # recebeu uma", "True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de procedimentos", "newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01:", "nós na lista... for li_ndx in range(l_node_list.length()): # inicia o dicionário de dados", "--------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da procedimento de aproximação em", "não tem a assinatura correta.\") # cria um evento de quit l_evt =", "not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de", "while not l_node.isNull(): # tenta converter o nó em um elemento l_element =", "events # < class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações sobre", "# control import control.events.events_basic as events # < class CApxData >------------------------------------------------------------------------------- class CApxData(dict):", "# obtém um nó da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is not", "identificação if \"nApx\" in fdct_data: # cria procedimento de aproximação l_apx = model.CApxNEW(self._model,", "None # faz o parse dos atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root)", "logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de procedimentos de aproximação.\") # cria", "!= fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo", "for, cai fora... return False, ls_msg # retorna Ok return True, None #", "l_element is not None # read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] =", "recebeu dados ? if f_data is not None: # recebeu uma lista ?", "uma procedimento de aproximação do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger", "# return code lv_ok = True # mensagem ls_msg = \"save Ok\" #", "l_evt = events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # termina a", "--------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os dados do procedimento de aproximação de", "not None assert fdct_data is not None # é uma procedimento de aproximação", "de aproximação @return flag e mensagem \"\"\" # check input assert fdct_root is", "ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling() assert l_node is not None #", "assert fs_apx_pn # cria o QFile para o arquivo XML do procedimentos de", "release (Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- # python library import logging import", "supported. from PyQt5 import QtXml # model import model.items.apx_new as model import model.items.parser_utils", "# salva o model manager self._model = f_model # salva o event manager", "atualiza o dicionário de dados ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling() assert", "dicionário de procedimento de aproximação de um arquivo em disco self.load_file(f_data) # ---------------------------------------------------------------------------------------------", "fora... sys.exit(1) # é um arquivo do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]:", "= QtCore.QFile(fs_apx_pn) assert l_data_file is not None # abre o arquivo XML do", "path name do arquivo onde salvar @return flag e mensagem \"\"\" # return", "# logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de procedimentos", "l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai fora... return", "procedimento de aproximação # FIXME QtXml is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\")", "fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]]", "fs_apx_pn=None): \"\"\" salva os dados da procedimento de aproximação em um arquivo em", "l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato aceito.\") # cria", "# retorna Ok return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega", "XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo ?", "# é a assinatura do newton ? if \"1961\" != fdct_root[\"CODE\"]: # logger", "def __init__(self, f_model, f_data=None): \"\"\" @param f_model: model manager @param f_data: dados dos", "aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo ? if not l_data_file.isOpen(): #", "lista de breakpoints ldct_data[\"breakpoints\"] = [] # obtém um nó da lista l_element", "else: # monta uma mensagem ls_msg = \"não tem identificação. Aproximação não incluída.\"", "dicionário de dados ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling() assert l_node is", "parser # control import control.events.events_basic as events # < class CApxData >------------------------------------------------------------------------------- class", "newton ? if \"1961\" != fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03:", "fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\")", "está em um formato aceito.\") # cria um evento de quit l_evt =", "o arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do", "# cria uma lista com os elementos de procedimento de aproximação l_node_list =", "= l_node.nextSibling() assert l_node is not None # carrega os dados de procedimento", "input assert fdct_root is not None assert fdct_data is not None # é", "\"\"\" # check input assert fs_apx_pn # carrega o arquivo de procedimento de", "Ok return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo", "\"1961\" != fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a", "\"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um", "é um arquivo de procedimentos de aproximação.\") # cria um evento de quit", "mensagem \"\"\" # return code lv_ok = True # mensagem ls_msg = \"save", "= logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai fora... return False,", "@param fs_apx_pn: path name do arquivo onde salvar @return flag e mensagem \"\"\"", "evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # fecha o arquivo l_data_file.close() #", "# copia o procedimento de aproximação pass # self.copy_apx(f_data) # senão, recebeu o", "fs_apx_pn: path name do arquivo onde salvar @return flag e mensagem \"\"\" #", "# cria um evento de quit l_evt = events.CQuit() assert l_evt # dissemina", "erro na carga do documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo", "o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"] #", "= l_node.toElement() assert l_element is not None # o nó é um elemento", "tem identificação. Aproximação não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg))", "inicia a super class super(CApxData, self).__init__() # salva o model manager self._model =", "logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) # cria um evento de", "o QFile para o arquivo XML do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn)", "de um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os", "logging import sys # PyQt library from PyQt5 import QtCore # FIXME QtXml", "# senão, recebeu o pathname de um arquivo de procedimento de aproximação else:", "da lista pass # self.make_apx(f_data) # recebeu um procedimento de aproximação ? elif", "l_evt # dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # cria", "QtXml # model import model.items.apx_new as model import model.items.parser_utils as parser # control", "raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not None # faz", "utf-8 -*- \"\"\" apx_data mantém as informações sobre o dicionário de procedimento de", "aproximação em um arquivo em disco @param fs_apx_pn: path name do arquivo onde", "é uma procedimento de aproximação do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: #", "de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento", "QtCore.QFile(fs_apx_pn) assert l_data_file is not None # abre o arquivo XML do procedimentos", "logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) # cria", "do arquivo onde salvar @return flag e mensagem \"\"\" # return code lv_ok", "None # erro na carga do documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha", "if f_data is not None: # recebeu uma lista ? if isinstance(f_data, list):", "é um arquivo do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log", "o procedimento de aproximação pass # self.copy_apx(f_data) # senão, recebeu o pathname de", "as events # < class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações", "# logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) #", "imports >-------------------------------------------------------------------------------------- # python library import logging import sys # PyQt library from", "a aplicação sys.exit(1) # fecha o arquivo l_data_file.close() # obtém o elemento raíz", "# python library import logging import sys # PyQt library from PyQt5 import", "flag e mensagem \"\"\" # return code lv_ok = True # mensagem ls_msg", "de breakpoints ldct_data[\"breakpoints\"] = [] # obtém um nó da lista l_element =", "um arquivo do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log =", "@param f_model: model manager @param f_data: dados dos procedimentos de aproximação \"\"\" #", "not None: # recebeu uma lista ? if isinstance(f_data, list): # cria um", "sys.exit(1) # verifica se existe identificação if \"nApx\" in fdct_data: # cria procedimento", "as model import model.items.parser_utils as parser # control import control.events.events_basic as events #", "f_model # inicia a super class super(CApxData, self).__init__() # salva o model manager", "erro na abertura do arquivo ? if not l_data_file.isOpen(): # logger l_log =", "e mensagem \"\"\" # return code lv_ok = True # mensagem ls_msg =", "quit l_evt = events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # termina", "if not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura", "!= fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um", "aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento de", "uma lista ? if isinstance(f_data, list): # cria um procedimento de aproximação com", "do procedimento de aproximação # FIXME QtXml is no longer supported. l_xdoc_apx =", "aproximação do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\")", "identificação else: # monta uma mensagem ls_msg = \"não tem identificação. Aproximação não", "aplicação sys.exit(1) # fecha o arquivo l_data_file.close() # obtém o elemento raíz do", "is not None # faz o parse dos atributos do elemento raíz ldct_root", "= logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\") # cria um evento", ">-------------------------------------------------------------------------------------- # python library import logging import sys # PyQt library from PyQt5", "documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not None # faz o parse", "um evento de quit l_evt = events.CQuit() assert l_evt # dissemina o evento", "arquivo de procedimentos de aproximação.\") # cria um evento de quit l_evt =", "não for, cai fora... return False, ls_msg # retorna Ok return True, None", "não for, cai fora... sys.exit(1) # é um arquivo do newton ? if", "na abertura de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt = events.CQuit()", "# dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # fecha o", "assert f_model # inicia a super class super(CApxData, self).__init__() # salva o model", "de procedimento de aproximação revision 0.2 2015/nov mlabru pep8 style conventions revision 0.1", "l_log.critical(\"<E03: não tem a assinatura correta.\") # cria um evento de quit l_evt", "do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário com o breakpoint if", "procedimentos de aproximação.\") # cria um evento de quit l_evt = events.CQuit() assert", "recebeu um procedimento de aproximação ? elif isinstance(f_data, CApxData): # copia o procedimento", "# --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados de procedimento de", "\"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em", "aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ...", "? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não", "2014/nov mlabru initial release (Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- # python library", "um elemento l_element = l_node.toElement() assert l_element is not None # o nó", "def load_file(self, fs_apx_pn): \"\"\" carrega os dados do procedimento de aproximação de um", "<aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # ---------------------------------------------------------------------------------------------", "a partir de um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\"", "o dicionário de dados ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling() assert l_node", "do documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() # logger", "? if not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na", "<ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self,", "atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"]", "do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo ? if", "evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é a assinatura", "faz o parse dos atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria", "self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é a assinatura do", "lista de dados de procedimento de aproximação @return flag e mensagem \"\"\" #", "no longer supported. from PyQt5 import QtXml # model import model.items.apx_new as model", "se não for, cai fora... sys.exit(1) # é a assinatura do newton ?", "list): # cria um procedimento de aproximação com os dados da lista pass", "elif isinstance(f_data, CApxData): # copia o procedimento de aproximação pass # self.copy_apx(f_data) #", "cai fora... sys.exit(1) # verifica se existe identificação if \"nApx\" in fdct_data: #", "{}\".format(ls_msg)) # se não for, cai fora... return False, ls_msg # retorna Ok", "informações sobre o dicionário de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo>", "super(CApxData, self).__init__() # salva o model manager self._model = f_model # salva o", "um nó da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is not None #", "# check input assert fs_apx_pn # cria o QFile para o arquivo XML", "sub-árvore while not l_node.isNull(): # tenta converter o nó em um elemento l_element", "aproximação com os dados da lista pass # self.make_apx(f_data) # recebeu um procedimento", "arquivo em disco @param fs_apx_pn: pathname do arquivo em disco \"\"\" # check", "um dicionário @param fdct_data: lista de dados de procedimento de aproximação @return flag", "l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de procedimentos de aproximação.\") # cria um", "XML do procedimento de aproximação # FIXME QtXml is no longer supported. l_xdoc_apx", "inicia a lista de breakpoints ldct_data[\"breakpoints\"] = [] # obtém um nó da", "arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de", "l_node is not None # carrega os dados de procedimento de aproximação a", "# cria o QFile para o arquivo XML do procedimentos de aproximação l_data_file", "o pathname de um arquivo de procedimento de aproximação else: # carrega o", "# self.copy_apx(f_data) # senão, recebeu o pathname de um arquivo de procedimento de", "= parser.parse_aproximacao(l_element) # atualiza o dicionário com o breakpoint if \"breakpoint\" in ldct_tmp:", "um arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os", "falha no parse de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt =", "manager self._event = f_model.event # recebeu dados ? if f_data is not None:", "l_evt # dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # fecha", "dicionário de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd>", "logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\") # cria", "= l_xdoc_apx.documentElement() assert l_elem_root is not None # faz o parse dos atributos", "= \"não tem identificação. Aproximação não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING)", "{} # inicia a lista de breakpoints ldct_data[\"breakpoints\"] = [] # obtém um", "do arquivo ? if not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01:", "de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na", "a lista de breakpoints ldct_data[\"breakpoints\"] = [] # obtém um nó da lista", "\"\"\" # check input assert fs_apx_pn # cria o QFile para o arquivo", "# se não for, cai fora... sys.exit(1) # é a assinatura do newton", "class CApxData(dict): \"\"\" mantém as informações sobre o dicionário de procedimento de aproximação", "todos os nós na lista... for li_ndx in range(l_node_list.length()): # inicia o dicionário", "aproximação @param fs_apx_pn: pathname do arquivo em disco \"\"\" # check input assert", "self.make_apx(f_data) # recebeu um procedimento de aproximação ? elif isinstance(f_data, CApxData): # copia", "l_xdoc_apx is not None # erro na carga do documento ? if not", "@return flag e mensagem \"\"\" # check input assert fdct_root is not None", "apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados ldct_data.update(ldct_tmp) #", "# faz o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário", "disco @param fs_apx_pn: path name do arquivo onde salvar @return flag e mensagem", "model.items.apx_new as model import model.items.parser_utils as parser # control import control.events.events_basic as events", "# FIXME QtXml is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is", "= l_element.firstChild() assert l_node is not None # percorre a sub-árvore while not", "ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário com o breakpoint if \"breakpoint\" in", "l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai fora... return False, ls_msg # retorna", "<descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\"", "dados ldct_data = {} # inicia a lista de breakpoints ldct_data[\"breakpoints\"] = []", "= int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da sub-árvore l_node = l_element.firstChild() assert", "de aproximação ? elif isinstance(f_data, CApxData): # copia o procedimento de aproximação pass", "evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # cria o documento XML do", "aproximação de um arquivo em disco @param fs_apx_pn: pathname do arquivo em disco", "import sys # PyQt library from PyQt5 import QtCore # FIXME QtXml is", "de aproximação # FIXME QtXml is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert", "primeiro nó da sub-árvore l_node = l_element.firstChild() assert l_node is not None #", "de aproximação a partir de um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self,", "check input assert fs_apx_pn # carrega o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn", "cai fora... return False, ls_msg # retorna Ok return True, None # ---------------------------------------------------------------------------------------------", "\"save Ok\" # retorna flag e mensagem return lv_ok, ls_msg # < the", "# verifica se existe identificação if \"nApx\" in fdct_data: # cria procedimento de", "l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None # erro na carga do", "procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não existe identificação", "é a assinatura do newton ? if \"1961\" != fdct_root[\"CODE\"]: # logger l_log", "converter o nó em um elemento l_element = l_node.toElement() assert l_element is not", "ls_msg = \"save Ok\" # retorna flag e mensagem return lv_ok, ls_msg #", "@return flag e mensagem \"\"\" # return code lv_ok = True # mensagem", "os nós na lista... for li_ndx in range(l_node_list.length()): # inicia o dicionário de", "fs_apx_pn): \"\"\" carrega o arquivo de procedimentos de aproximação @param fs_apx_pn: pathname do", "as informações sobre o dicionário de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao>", "ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados ldct_data.update(ldct_tmp) # próximo nó l_node =", "do arquivo em disco \"\"\" # check input assert fs_apx_pn # cria o", "de procedimento de aproximação @return flag e mensagem \"\"\" # check input assert", "l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai fora... return False, ls_msg #", "no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não existe identificação else: # monta", "if \"1961\" != fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem", "? if isinstance(f_data, list): # cria um procedimento de aproximação com os dados", "procedimento de aproximação a partir de um dicionário @param fdct_data: lista de dados", "de procedimento de aproximação else: # carrega o dicionário de procedimento de aproximação", "o arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse", "elemento l_element = l_node.toElement() assert l_element is not None # o nó é", "aproximação revision 0.2 2015/nov mlabru pep8 style conventions revision 0.1 2014/nov mlabru initial", "procedimento de aproximação ? elif isinstance(f_data, CApxData): # copia o procedimento de aproximação", "# faz o parse dos atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) #", "--------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param f_model: model manager @param f_data: dados", "tenta converter o nó em um elemento l_element = l_node.toElement() assert l_element is", "is not None # o nó é um elemento ? if not l_element.isNull():", "0.2 2015/nov mlabru pep8 style conventions revision 0.1 2014/nov mlabru initial release (Linux/Python)", "aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not None # abre o arquivo", "library import logging import sys # PyQt library from PyQt5 import QtCore #", "salva o event manager self._event = f_model.event # recebeu dados ? if f_data", "de aproximação do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log =", "não é um arquivo de procedimentos de aproximação.\") # cria um evento de", "style conventions revision 0.1 2014/nov mlabru initial release (Linux/Python) \"\"\" # < imports", "de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na lista... for", "range(l_node_list.length()): # inicia o dicionário de dados ldct_data = {} # inicia a", "o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self,", "fs_apx_pn: pathname do arquivo em disco \"\"\" # check input assert fs_apx_pn #", "# o nó é um elemento ? if not l_element.isNull(): # faz o", "model manager @param f_data: dados dos procedimentos de aproximação \"\"\" # check input", "ldct_tmp: # atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento", "CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações sobre o dicionário de procedimento", "a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não existe", "# retorna flag e mensagem return lv_ok, ls_msg # < the end >--------------------------------------------------------------------------------------", "de aproximação de um arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn):", "# PyQt library from PyQt5 import QtCore # FIXME QtXml is no longer", "do procedimento de aproximação de um arquivo em disco @param fs_apx_pn: pathname do", "None # carrega os dados de procedimento de aproximação a partir de um", "conventions revision 0.1 2014/nov mlabru initial release (Linux/Python) \"\"\" # < imports >--------------------------------------------------------------------------------------", "None assert fdct_data is not None # é uma procedimento de aproximação do", "com os elementos de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos", "newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02:", "com os dados da lista pass # self.make_apx(f_data) # recebeu um procedimento de", "arquivo de procedimentos de aproximação @param fs_apx_pn: pathname do arquivo em disco \"\"\"", "aproximação @return flag e mensagem \"\"\" # check input assert fdct_root is not", "ls_msg # retorna Ok return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\"", "check input assert fdct_root is not None assert fdct_data is not None #", "# inicia a lista de breakpoints ldct_data[\"breakpoints\"] = [] # obtém um nó", "# self.make_apx(f_data) # recebeu um procedimento de aproximação ? elif isinstance(f_data, CApxData): #", "se não for, cai fora... return False, ls_msg # retorna Ok return True,", "o arquivo de procedimentos de aproximação @param fs_apx_pn: pathname do arquivo em disco", "sys.exit(1) # cria o documento XML do procedimento de aproximação # FIXME QtXml", "= l_apx # senão, não existe identificação else: # monta uma mensagem ls_msg", "<breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\"", "is not None # percorre a sub-árvore while not l_node.isNull(): # tenta converter", "os dados de procedimento de aproximação a partir de um dicionário @param fdct_data:", "(Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- # python library import logging import sys", "fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de", "is not None # read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\"))", "= [] # obtém um nó da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element", "= model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento de aproximação no", "for li_ndx in range(l_node_list.length()): # inicia o dicionário de dados ldct_data = {}", "!= fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura", "if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da", "available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da sub-árvore", "copia o procedimento de aproximação pass # self.copy_apx(f_data) # senão, recebeu o pathname", "cai fora... sys.exit(1) # é um arquivo do newton ? if \"NEWTON\" !=", "é um elemento ? if not l_element.isNull(): # faz o parse do elemento", "dados de procedimento de aproximação @return flag e mensagem \"\"\" # check input", "evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é um arquivo", "coloca a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não", "f_model # salva o event manager self._event = f_model.event # recebeu dados ?", "lista com os elementos de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para", "# mensagem ls_msg = \"save Ok\" # retorna flag e mensagem return lv_ok,", "fs_apx_pn # carrega o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") #", "l_node_list.at(li_ndx).toElement() assert l_element is not None # read identification if available if l_element.hasAttribute(\"nApx\"):", "de um dicionário @param fdct_data: lista de dados de procedimento de aproximação @return", "dissemina o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é", "aplicação sys.exit(1) # cria o documento XML do procedimento de aproximação # FIXME", "cria uma lista com os elementos de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\")", "model import model.items.apx_new as model import model.items.parser_utils as parser # control import control.events.events_basic", "se existe identificação if \"nApx\" in fdct_data: # cria procedimento de aproximação l_apx", "revision 0.2 2015/nov mlabru pep8 style conventions revision 0.1 2014/nov mlabru initial release", "if isinstance(f_data, list): # cria um procedimento de aproximação com os dados da", "de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not None # abre o", "fdct_data): \"\"\" carrega os dados de procedimento de aproximação a partir de um", "o elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not None", "disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os dados do procedimento", "if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\")", "# próximo nó l_node = l_node.nextSibling() assert l_node is not None # carrega", "= l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na lista... for li_ndx in range(l_node_list.length()):", "# --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de procedimentos de aproximação", "salva os dados da procedimento de aproximação em um arquivo em disco @param", "# cria um procedimento de aproximação com os dados da lista pass #", "\"\"\" apx_data mantém as informações sobre o dicionário de procedimento de aproximação revision", "assert l_xdoc_apx is not None # erro na carga do documento ? if", "procedimento de aproximação revision 0.2 2015/nov mlabru pep8 style conventions revision 0.1 2014/nov", "model import model.items.parser_utils as parser # control import control.events.events_basic as events # <", "event manager self._event = f_model.event # recebeu dados ? if f_data is not", "l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo ? if not l_data_file.isOpen(): # logger", "assert l_node is not None # carrega os dados de procedimento de aproximação", "carrega o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def", "l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato aceito.\") # cria um evento de", "# inicia o dicionário de dados ldct_data = {} # inicia a lista", "e mensagem \"\"\" # check input assert fdct_root is not None assert fdct_data", "l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn))", "dados ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling() assert l_node is not None", "import control.events.events_basic as events # < class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém", "l_element.isNull(): # faz o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o", "= {} # inicia a lista de breakpoints ldct_data[\"breakpoints\"] = [] # obtém", "breakpoint if \"breakpoint\" in ldct_tmp: # atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"])", "aproximação a partir de um dicionário @param fdct_data: lista de dados de procedimento", "fs_apx_pn # cria o QFile para o arquivo XML do procedimentos de aproximação", "# coloca a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão,", "parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de procedimentos de aproximação @param fs_apx_pn: pathname", "procedimento de aproximação com os dados da lista pass # self.make_apx(f_data) # recebeu", "# --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os dados do procedimento de aproximação", "elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados ldct_data.update(ldct_tmp) # próximo nó", "não for, cai fora... sys.exit(1) # verifica se existe identificação if \"nApx\" in", "FIXME QtXml is no longer supported. from PyQt5 import QtXml # model import", "# check input assert fdct_root is not None assert fdct_data is not None", "obtém o primeiro nó da sub-árvore l_node = l_element.firstChild() assert l_node is not", "fdct_data: # cria procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx", "um arquivo em disco @param fs_apx_pn: pathname do arquivo em disco \"\"\" #", "informações sobre o dicionário de procedimento de aproximação revision 0.2 2015/nov mlabru pep8", "class super(CApxData, self).__init__() # salva o model manager self._model = f_model # salva", "lista ? if isinstance(f_data, list): # cria um procedimento de aproximação com os", "dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não existe identificação else: # monta uma", "abre o arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura", "# erro na carga do documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha o", "o arquivo XML do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is", "procedimento de aproximação em um arquivo em disco @param fs_apx_pn: path name do", "\"\"\" # return code lv_ok = True # mensagem ls_msg = \"save Ok\"", "is no longer supported. from PyQt5 import QtXml # model import model.items.apx_new as", "l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn))", "l_elem_root is not None # faz o parse dos atributos do elemento raíz", "cria um procedimento de aproximação com os dados da lista pass # self.make_apx(f_data)", "= logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) # cria um evento", "# -*- coding: utf-8 -*- \"\"\" apx_data mantém as informações sobre o dicionário", "existe identificação if \"nApx\" in fdct_data: # cria procedimento de aproximação l_apx =", "not None # erro na carga do documento ? if not l_xdoc_apx.setContent(l_data_file): #", "= \"save Ok\" # retorna flag e mensagem return lv_ok, ls_msg # <", "procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a", "monta uma mensagem ls_msg = \"não tem identificação. Aproximação não incluída.\" # logger", "# fecha o arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha", "assert fs_apx_pn # carrega o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\")", "from PyQt5 import QtXml # model import model.items.apx_new as model import model.items.parser_utils as", "atualiza o dicionário com o breakpoint if \"breakpoint\" in ldct_tmp: # atualiza o", "dados de procedimento de aproximação a partir de um dicionário self.make_apx(ldct_root, ldct_data) #", "em disco @param fs_apx_pn: path name do arquivo onde salvar @return flag e", "if \"nApx\" in fdct_data: # cria procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data,", "is not None: # recebeu uma lista ? if isinstance(f_data, list): # cria", "ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da sub-árvore l_node = l_element.firstChild()", "if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está", "arquivo em disco \"\"\" # check input assert fs_apx_pn # carrega o arquivo", "arquivo em disco @param fs_apx_pn: path name do arquivo onde salvar @return flag", "em um elemento l_element = l_node.toElement() assert l_element is not None # o", "# erro na abertura do arquivo ? if not l_data_file.isOpen(): # logger l_log", "f_data: dados dos procedimentos de aproximação \"\"\" # check input assert f_model #", "um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados", "FIXME QtXml is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not", "l_node = l_node.nextSibling() assert l_node is not None # carrega os dados de", "# dissemina o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) #", "arquivo XML do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not", "arquivo l_data_file.close() # obtém o elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert", "\"\"\" salva os dados da procedimento de aproximação em um arquivo em disco", "f_data=None): \"\"\" @param f_model: model manager @param f_data: dados dos procedimentos de aproximação", "\"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param f_model: model manager @param", "documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() # logger l_log", "def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de procedimentos de aproximação @param fs_apx_pn:", "abertura do arquivo ? if not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL)", "QtXml is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None", "None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de procedimentos de", "fecha o arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no", "o nó em um elemento l_element = l_node.toElement() assert l_element is not None", "<aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint>", "{}.\".format(fs_apx_pn)) # cria um evento de quit l_evt = events.CQuit() assert l_evt #", "not None # abre o arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) #", "XML do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not None", "# se não for, cai fora... sys.exit(1) # é um arquivo do newton", "dicionário @param fdct_data: lista de dados de procedimento de aproximação @return flag e", "um arquivo de procedimento de aproximação else: # carrega o dicionário de procedimento", "# recebeu dados ? if f_data is not None: # recebeu uma lista", "aproximação ? elif isinstance(f_data, CApxData): # copia o procedimento de aproximação pass #", "de aproximação.\") # cria um evento de quit l_evt = events.CQuit() assert l_evt", "aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na lista... for li_ndx", "fdct_root, fdct_data): \"\"\" carrega os dados de procedimento de aproximação a partir de", "este elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados ldct_data.update(ldct_tmp) # próximo", "do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not None #", "lista pass # self.make_apx(f_data) # recebeu um procedimento de aproximação ? elif isinstance(f_data,", "cria procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca", "elementos de procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós", "ldct_data[\"breakpoints\"] = [] # obtém um nó da lista l_element = l_node_list.at(li_ndx).toElement() assert", "com o breakpoint if \"breakpoint\" in ldct_tmp: # atualiza o dicionário com o", "\"não tem identificação. Aproximação não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04:", "lista... for li_ndx in range(l_node_list.length()): # inicia o dicionário de dados ldct_data =", "ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da procedimento de", "de aproximação em um arquivo em disco @param fs_apx_pn: path name do arquivo", "logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\") # cria um evento de", "arquivo ? if not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro", "not None # o nó é um elemento ? if not l_element.isNull(): #", "load_file(self, fs_apx_pn): \"\"\" carrega os dados do procedimento de aproximação de um arquivo", "com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza o", "mlabru pep8 style conventions revision 0.1 2014/nov mlabru initial release (Linux/Python) \"\"\" #", "\"\"\" @param f_model: model manager @param f_data: dados dos procedimentos de aproximação \"\"\"", "mensagem ls_msg = \"save Ok\" # retorna flag e mensagem return lv_ok, ls_msg", "input assert f_model # inicia a super class super(CApxData, self).__init__() # salva o", "parse de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt = events.CQuit() assert", "parse dos atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista", "assert l_element is not None # o nó é um elemento ? if", "not None # carrega os dados de procedimento de aproximação a partir de", "um formato aceito.\") # cria um evento de quit l_evt = events.CQuit() assert", "lv_ok = True # mensagem ls_msg = \"save Ok\" # retorna flag e", "o model manager self._model = f_model # salva o event manager self._event =", "procedimentos de aproximação \"\"\" # check input assert f_model # inicia a super", "o dicionário de procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils>", "próximo nó l_node = l_node.nextSibling() assert l_node is not None # carrega os", "input assert fs_apx_pn # cria o QFile para o arquivo XML do procedimentos", "if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da sub-árvore l_node", "ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados", "l_xdoc_apx.documentElement() assert l_elem_root is not None # faz o parse dos atributos do", "nó é um elemento ? if not l_element.isNull(): # faz o parse do", "fdct_root is not None assert fdct_data is not None # é uma procedimento", "l_node.toElement() assert l_element is not None # o nó é um elemento ?", "@param fdct_data: lista de dados de procedimento de aproximação @return flag e mensagem", "self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é um arquivo do", "breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de", "de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega", "[] # obtém um nó da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is", "ldct_data = {} # inicia a lista de breakpoints ldct_data[\"breakpoints\"] = [] #", "self[fdct_data[\"nApx\"]] = l_apx # senão, não existe identificação else: # monta uma mensagem", "class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações sobre o dicionário de", "no parse de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt = events.CQuit()", "# atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del", "dicionário com o breakpoint if \"breakpoint\" in ldct_tmp: # atualiza o dicionário com", "identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro nó", "abertura de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt = events.CQuit() assert", "self._event.post(l_evt) # termina a aplicação sys.exit(1) # fecha o arquivo l_data_file.close() # obtém", "de procedimentos de aproximação.\") # cria um evento de quit l_evt = events.CQuit()", "o event manager self._event = f_model.event # recebeu dados ? if f_data is", "para o arquivo XML do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file", "de um arquivo de procedimento de aproximação else: # carrega o dicionário de", "logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai fora...", "recebeu uma lista ? if isinstance(f_data, list): # cria um procedimento de aproximação", "disco \"\"\" # check input assert fs_apx_pn # carrega o arquivo de procedimento", "# é uma procedimento de aproximação do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]:", "procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not None # abre", "read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro", "None # read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém", "# percorre a sub-árvore while not l_node.isNull(): # tenta converter o nó em", "logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai fora... return False, ls_msg", "# apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário de dados ldct_data.update(ldct_tmp)", "= parser.parse_root_element(l_elem_root) # cria uma lista com os elementos de procedimento de aproximação", "if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é", "senão, não existe identificação else: # monta uma mensagem ls_msg = \"não tem", "sys.exit(1) # é um arquivo do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: #", "dissemina o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # verifica", "o parse dos atributos do elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma", "self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da procedimento", "apx_data mantém as informações sobre o dicionário de procedimento de aproximação revision 0.2", "assert l_elem_root is not None # faz o parse dos atributos do elemento", "o dicionário de dados ldct_data = {} # inicia a lista de breakpoints", "a assinatura do newton ? if \"1961\" != fdct_root[\"CODE\"]: # logger l_log =", "procedimento de aproximação l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na lista...", "# < class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as informações sobre o", "procedimento de aproximação @return flag e mensagem \"\"\" # check input assert fdct_root", "em um formato aceito.\") # cria um evento de quit l_evt = events.CQuit()", "carrega o arquivo de procedimentos de aproximação @param fs_apx_pn: pathname do arquivo em", "2015/nov mlabru pep8 style conventions revision 0.1 2014/nov mlabru initial release (Linux/Python) \"\"\"", "li_ndx in range(l_node_list.length()): # inicia o dicionário de dados ldct_data = {} #", "# atualiza o dicionário com o breakpoint if \"breakpoint\" in ldct_tmp: # atualiza", "+ \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados de", "do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL)", "cai fora... sys.exit(1) # é a assinatura do newton ? if \"1961\" !=", "l_node.nextSibling() assert l_node is not None # carrega os dados de procedimento de", "l_node_list = l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na lista... for li_ndx in", "= True # mensagem ls_msg = \"save Ok\" # retorna flag e mensagem", "arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega os dados", "o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # verifica se", "nó da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is not None # read", "incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for,", "f_model, f_data=None): \"\"\" @param f_model: model manager @param f_data: dados dos procedimentos de", "QFile para o arquivo XML do procedimentos de aproximação l_data_file = QtCore.QFile(fs_apx_pn) assert", "dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza", "parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário com o breakpoint", "assert fdct_root is not None assert fdct_data is not None # é uma", "de aproximação a partir de um dicionário @param fdct_data: lista de dados de", "# senão, não existe identificação else: # monta uma mensagem ls_msg = \"não", "partir de um dicionário self.make_apx(ldct_root, ldct_data) # --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva", "if \"breakpoint\" in ldct_tmp: # atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) #", "save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da procedimento de aproximação em um arquivo", "pass # self.copy_apx(f_data) # senão, recebeu o pathname de um arquivo de procedimento", "not None # faz o parse dos atributos do elemento raíz ldct_root =", "0.1 2014/nov mlabru initial release (Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- # python", "de procedimento de aproximação a partir de um dicionário self.make_apx(ldct_root, ldct_data) # ---------------------------------------------------------------------------------------------", "return code lv_ok = True # mensagem ls_msg = \"save Ok\" # retorna", "carga do documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() #", "l_node is not None # percorre a sub-árvore while not l_node.isNull(): # tenta", "recebeu o pathname de um arquivo de procedimento de aproximação else: # carrega", "# inicia a super class super(CApxData, self).__init__() # salva o model manager self._model", "de aproximação \"\"\" # check input assert f_model # inicia a super class", "check input assert fs_apx_pn # cria o QFile para o arquivo XML do", "l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de procedimentos de aproximação.\")", "logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) # cria um evento de", "onde salvar @return flag e mensagem \"\"\" # return code lv_ok = True", "# carrega os dados de procedimento de aproximação a partir de um dicionário", "as parser # control import control.events.events_basic as events # < class CApxData >-------------------------------------------------------------------------------", "? if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() # logger l_log =", "elemento raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista com os elementos de", "for, cai fora... sys.exit(1) # é a assinatura do newton ? if \"1961\"", "isinstance(f_data, CApxData): # copia o procedimento de aproximação pass # self.copy_apx(f_data) # senão,", "da procedimento de aproximação em um arquivo em disco @param fs_apx_pn: path name", "manager @param f_data: dados dos procedimentos de aproximação \"\"\" # check input assert", "aproximação else: # carrega o dicionário de procedimento de aproximação de um arquivo", "pathname de um arquivo de procedimento de aproximação else: # carrega o dicionário", "a sub-árvore while not l_node.isNull(): # tenta converter o nó em um elemento", "dos procedimentos de aproximação \"\"\" # check input assert f_model # inicia a", "= logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato aceito.\") # cria um", "assinatura correta.\") # cria um evento de quit l_evt = events.CQuit() assert l_evt", "@param fs_apx_pn: pathname do arquivo em disco \"\"\" # check input assert fs_apx_pn", "os dados da lista pass # self.make_apx(f_data) # recebeu um procedimento de aproximação", "# se não for, cai fora... return False, ls_msg # retorna Ok return", "percorre a sub-árvore while not l_node.isNull(): # tenta converter o nó em um", "de um arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\" carrega", "procedimento de aproximação pass # self.copy_apx(f_data) # senão, recebeu o pathname de um", "# recebeu uma lista ? if isinstance(f_data, list): # cria um procedimento de", "o dicionário de procedimento de aproximação de um arquivo em disco self.load_file(f_data) #", "formato aceito.\") # cria um evento de quit l_evt = events.CQuit() assert l_evt", "na lista... for li_ndx in range(l_node_list.length()): # inicia o dicionário de dados ldct_data", "não está em um formato aceito.\") # cria um evento de quit l_evt", "import model.items.parser_utils as parser # control import control.events.events_basic as events # < class", "is not None # abre o arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly)", "na abertura do arquivo ? if not l_data_file.isOpen(): # logger l_log = logging.getLogger(\"CApxData::make_apx\")", "model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento de aproximação no dicionário", "dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # cria o documento", "logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato aceito.\") # cria um evento", "evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # verifica se existe", "# carrega o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # ---------------------------------------------------------------------------------------------", "procedimento de aproximação de um arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self,", "l_log.critical(\"<E01: não é um arquivo de procedimentos de aproximação.\") # cria um evento", "se não for, cai fora... sys.exit(1) # verifica se existe identificação if \"nApx\"", "a aplicação sys.exit(1) # cria o documento XML do procedimento de aproximação #", "do newton ? if \"1961\" != fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL)", "# read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o", "mensagem \"\"\" # check input assert fdct_root is not None assert fdct_data is", "procedimento de aproximação do newton ? if \"aproximacoes\" != fdct_root[\"tagName\"]: # logger l_log", "do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL)", "cria o documento XML do procedimento de aproximação # FIXME QtXml is no", "not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL)", "# --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param f_model: model manager @param f_data:", "l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) # cria um", "# model import model.items.apx_new as model import model.items.parser_utils as parser # control import", "= f_model.event # recebeu dados ? if f_data is not None: # recebeu", "= l_node_list.at(li_ndx).toElement() assert l_element is not None # read identification if available if", "else: # carrega o dicionário de procedimento de aproximação de um arquivo em", "aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os", "None # abre o arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro", "= events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # termina a aplicação", "parser.parse_aproximacao(l_element) # atualiza o dicionário com o breakpoint if \"breakpoint\" in ldct_tmp: #", "is no longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None #", "l_elem_root.elementsByTagName(\"aproximacao\") # para todos os nós na lista... for li_ndx in range(l_node_list.length()): #", "o dicionário com o breakpoint if \"breakpoint\" in ldct_tmp: # atualiza o dicionário", "o dicionário de procedimento de aproximação revision 0.2 2015/nov mlabru pep8 style conventions", "None: # recebeu uma lista ? if isinstance(f_data, list): # cria um procedimento", "input assert fs_apx_pn # carrega o arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn +", "obtém um nó da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is not None", "cria o QFile para o arquivo XML do procedimentos de aproximação l_data_file =", "in fdct_data: # cria procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert", "# é um arquivo do newton ? if \"NEWTON\" != fdct_root[\"FORMAT\"]: # logger", "lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is not None # read identification if", "do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not None # faz o", "dados da procedimento de aproximação em um arquivo em disco @param fs_apx_pn: path", "initial release (Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- # python library import logging", "super class super(CApxData, self).__init__() # salva o model manager self._model = f_model #", "\"\"\" carrega o arquivo de procedimentos de aproximação @param fs_apx_pn: pathname do arquivo", "assert l_apx # coloca a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx", "# recebeu um procedimento de aproximação ? elif isinstance(f_data, CApxData): # copia o", "import model.items.apx_new as model import model.items.parser_utils as parser # control import control.events.events_basic as", "em um arquivo em disco @param fs_apx_pn: path name do arquivo onde salvar", "salvar @return flag e mensagem \"\"\" # return code lv_ok = True #", "a partir de um dicionário @param fdct_data: lista de dados de procedimento de", "l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt", "self).__init__() # salva o model manager self._model = f_model # salva o event", "manager self._model = f_model # salva o event manager self._event = f_model.event #", "PyQt5 import QtCore # FIXME QtXml is no longer supported. from PyQt5 import", "model manager self._model = f_model # salva o event manager self._event = f_model.event", "tem a assinatura correta.\") # cria um evento de quit l_evt = events.CQuit()", "as informações sobre o dicionário de procedimento de aproximação revision 0.2 2015/nov mlabru", "pathname do arquivo em disco \"\"\" # check input assert fs_apx_pn # carrega", "fora... sys.exit(1) # é a assinatura do newton ? if \"1961\" != fdct_root[\"CODE\"]:", "l_data_file is not None # abre o arquivo XML do procedimentos de aproximação", "l_data_file = QtCore.QFile(fs_apx_pn) assert l_data_file is not None # abre o arquivo XML", "not l_element.isNull(): # faz o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza", "de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\">", "mlabru initial release (Linux/Python) \"\"\" # < imports >-------------------------------------------------------------------------------------- # python library import", "o documento XML do procedimento de aproximação # FIXME QtXml is no longer", "procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na abertura do arquivo ? if not", "not None # percorre a sub-árvore while not l_node.isNull(): # tenta converter o", "import QtCore # FIXME QtXml is no longer supported. from PyQt5 import QtXml", "True # mensagem ls_msg = \"save Ok\" # retorna flag e mensagem return", "um arquivo de procedimentos de aproximação.\") # cria um evento de quit l_evt", "= logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: erro na abertura de {}.\".format(fs_apx_pn)) # cria um evento", "aproximação pass # self.copy_apx(f_data) # senão, recebeu o pathname de um arquivo de", "carrega os dados de procedimento de aproximação a partir de um dicionário @param", "# abre o arquivo XML do procedimentos de aproximação l_data_file.open(QtCore.QIODevice.ReadOnly) # erro na", "events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # se não for, cai", "raíz ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista com os elementos de procedimento", "self.copy_apx(f_data) # senão, recebeu o pathname de um arquivo de procedimento de aproximação", "disco \"\"\" # check input assert fs_apx_pn # cria o QFile para o", "return True, None # --------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de", "# < imports >-------------------------------------------------------------------------------------- # python library import logging import sys # PyQt", "\"breakpoint\" in ldct_tmp: # atualiza o dicionário com o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga", "? if f_data is not None: # recebeu uma lista ? if isinstance(f_data,", "a super class super(CApxData, self).__init__() # salva o model manager self._model = f_model", "logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: falha no parse de {}.\".format(fs_apx_pn)) # cria", "arquivo de procedimento de aproximação else: # carrega o dicionário de procedimento de", "verifica se existe identificação if \"nApx\" in fdct_data: # cria procedimento de aproximação", "l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx # coloca a procedimento de aproximação", "def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da procedimento de aproximação em um", "name do arquivo onde salvar @return flag e mensagem \"\"\" # return code", "# logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se não for, cai", "Ok\" # retorna flag e mensagem return lv_ok, ls_msg # < the end", "nó l_node = l_node.nextSibling() assert l_node is not None # carrega os dados", "assert l_data_file is not None # abre o arquivo XML do procedimentos de", "# cria procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"]) assert l_apx #", "python library import logging import sys # PyQt library from PyQt5 import QtCore", "arquivo de procedimento de aproximação self.parse_apx_xml(fs_apx_pn + \".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root,", "longer supported. from PyQt5 import QtXml # model import model.items.apx_new as model import", "de {}.\".format(fs_apx_pn)) # cria um evento de quit l_evt = events.CQuit() assert l_evt", "assert l_element is not None # read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"]", "um procedimento de aproximação com os dados da lista pass # self.make_apx(f_data) #", "aproximação de um arquivo em disco self.load_file(f_data) # --------------------------------------------------------------------------------------------- def load_file(self, fs_apx_pn): \"\"\"", "in range(l_node_list.length()): # inicia o dicionário de dados ldct_data = {} # inicia", "l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não tem a assinatura correta.\") # cria um", "if not l_element.isNull(): # faz o parse do elemento ldct_tmp = parser.parse_aproximacao(l_element) #", "elemento ldct_tmp = parser.parse_aproximacao(l_element) # atualiza o dicionário com o breakpoint if \"breakpoint\"", "uma mensagem ls_msg = \"não tem identificação. Aproximação não incluída.\" # logger l_log", "de um arquivo em disco @param fs_apx_pn: pathname do arquivo em disco \"\"\"", "</aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param f_model: model manager", "f_model: model manager @param f_data: dados dos procedimentos de aproximação \"\"\" # check", "# atualiza o dicionário de dados ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling()", "fora... return False, ls_msg # retorna Ok return True, None # --------------------------------------------------------------------------------------------- def", "o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # cria o documento XML", "salva o model manager self._model = f_model # salva o event manager self._event", "os dados do procedimento de aproximação de um arquivo em disco @param fs_apx_pn:", "\"\"\" # < imports >-------------------------------------------------------------------------------------- # python library import logging import sys #", "fora... sys.exit(1) # verifica se existe identificação if \"nApx\" in fdct_data: # cria", "na carga do documento ? if not l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close()", "= QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None # erro na carga do documento", "l_element = l_node_list.at(li_ndx).toElement() assert l_element is not None # read identification if available", "o nó é um elemento ? if not l_element.isNull(): # faz o parse", "is not None assert fdct_data is not None # é uma procedimento de", "o breakpoint ldct_data[\"breakpoints\"].append(ldct_tmp[\"breakpoint\"]) # apaga este elemento del ldct_tmp[\"breakpoint\"] # atualiza o dicionário", "self._event.post(l_evt) # termina a aplicação sys.exit(1) # cria o documento XML do procedimento", "# obtém o primeiro nó da sub-árvore l_node = l_element.firstChild() assert l_node is", "\".xml\") # --------------------------------------------------------------------------------------------- def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados de procedimento", "se não for, cai fora... sys.exit(1) # é um arquivo do newton ?", "arquivo em disco \"\"\" # check input assert fs_apx_pn # cria o QFile", "check input assert f_model # inicia a super class super(CApxData, self).__init__() # salva", "# salva o event manager self._event = f_model.event # recebeu dados ? if", "control.events.events_basic as events # < class CApxData >------------------------------------------------------------------------------- class CApxData(dict): \"\"\" mantém as", "\"\"\" # check input assert f_model # inicia a super class super(CApxData, self).__init__()", "<espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None):", "de dados de procedimento de aproximação @return flag e mensagem \"\"\" # check", "-*- coding: utf-8 -*- \"\"\" apx_data mantém as informações sobre o dicionário de", "ldct_root = parser.parse_root_element(l_elem_root) # cria uma lista com os elementos de procedimento de", "pep8 style conventions revision 0.1 2014/nov mlabru initial release (Linux/Python) \"\"\" # <", "PyQt5 import QtXml # model import model.items.apx_new as model import model.items.parser_utils as parser", "procedimento de aproximação <aproximacao nApx=\"1\"> <descricao>FINAL H3</descricao> <aerodromo>SBSP</aerodromo> <pista>17R</pista> <ils>N</ils> <aproxperd>N</aproxperd> <espera>2</espera> <breakpoint", "identificação. Aproximação não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) #", "# --------------------------------------------------------------------------------------------- def save2disk(self, fs_apx_pn=None): \"\"\" salva os dados da procedimento de aproximação", "de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx # senão, não existe identificação else:", "int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da sub-árvore l_node = l_element.firstChild() assert l_node", "sys # PyQt library from PyQt5 import QtCore # FIXME QtXml is no", "quit l_evt = events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # se", "<aproxperd>N</aproxperd> <espera>2</espera> <breakpoint nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model,", "library from PyQt5 import QtCore # FIXME QtXml is no longer supported. from", "de quit l_evt = events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) #", "nó em um elemento l_element = l_node.toElement() assert l_element is not None #", "o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # fecha o arquivo l_data_file.close()", "os dados da procedimento de aproximação em um arquivo em disco @param fs_apx_pn:", "CApxData(dict): \"\"\" mantém as informações sobre o dicionário de procedimento de aproximação <aproximacao", "dissemina o evento self._event.post(l_evt) # termina a aplicação sys.exit(1) # fecha o arquivo", "\"\"\" carrega os dados de procedimento de aproximação a partir de um dicionário", "--------------------------------------------------------------------------------------------- def parse_apx_xml(self, fs_apx_pn): \"\"\" carrega o arquivo de procedimentos de aproximação @param", "l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) # obtém o primeiro nó da sub-árvore l_node =", "de aproximação de um arquivo em disco @param fs_apx_pn: pathname do arquivo em", "Aproximação não incluída.\" # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.WARNING) l_log.warning(\"<E04: {}\".format(ls_msg)) # se", "? if \"1961\" != fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E03: não", "for, cai fora... sys.exit(1) # é um arquivo do newton ? if \"NEWTON\"", "l_xdoc_apx.setContent(l_data_file): # fecha o arquivo l_data_file.close() # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02:", "existe identificação else: # monta uma mensagem ls_msg = \"não tem identificação. Aproximação", "None # percorre a sub-árvore while not l_node.isNull(): # tenta converter o nó", "pathname do arquivo em disco \"\"\" # check input assert fs_apx_pn # cria", "f_data is not None: # recebeu uma lista ? if isinstance(f_data, list): #", "termina a aplicação sys.exit(1) # cria o documento XML do procedimento de aproximação", "# tenta converter o nó em um elemento l_element = l_node.toElement() assert l_element", "# check input assert f_model # inicia a super class super(CApxData, self).__init__() #", "logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato aceito.\") #", "l_apx # coloca a procedimento de aproximação no dicionário self[fdct_data[\"nApx\"]] = l_apx #", "obtém o elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not", "dados da lista pass # self.make_apx(f_data) # recebeu um procedimento de aproximação ?", "l_evt = events.CQuit() assert l_evt # dissemina o evento self._event.post(l_evt) # se não", "not None # read identification if available if l_element.hasAttribute(\"nApx\"): ldct_data[\"nApx\"] = int(l_element.attribute(\"nApx\")) #", "fdct_root[\"FORMAT\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E02: não está em um formato", "o primeiro nó da sub-árvore l_node = l_element.firstChild() assert l_node is not None", "= logging.getLogger(\"CApxData::make_apx\") l_log.setLevel(logging.CRITICAL) l_log.critical(\"<E01: não é um arquivo de procedimentos de aproximação.\") #", "assinatura do newton ? if \"1961\" != fdct_root[\"CODE\"]: # logger l_log = logging.getLogger(\"CApxData::make_apx\")", "@param f_data: dados dos procedimentos de aproximação \"\"\" # check input assert f_model", "QtCore # FIXME QtXml is no longer supported. from PyQt5 import QtXml #", "is not None # erro na carga do documento ? if not l_xdoc_apx.setContent(l_data_file):", "l_element.firstChild() assert l_node is not None # percorre a sub-árvore while not l_node.isNull():", "None # o nó é um elemento ? if not l_element.isNull(): # faz", "da lista l_element = l_node_list.at(li_ndx).toElement() assert l_element is not None # read identification", "em disco \"\"\" # check input assert fs_apx_pn # cria o QFile para", "= f_model # salva o event manager self._event = f_model.event # recebeu dados", "carrega os dados do procedimento de aproximação de um arquivo em disco @param", "cria um evento de quit l_evt = events.CQuit() assert l_evt # dissemina o", "# se não for, cai fora... sys.exit(1) # verifica se existe identificação if", "de dados ldct_data.update(ldct_tmp) # próximo nó l_node = l_node.nextSibling() assert l_node is not", "de dados ldct_data = {} # inicia a lista de breakpoints ldct_data[\"breakpoints\"] =", "__init__(self, f_model, f_data=None): \"\"\" @param f_model: model manager @param f_data: dados dos procedimentos", "aproximação \"\"\" # check input assert f_model # inicia a super class super(CApxData,", "# carrega o dicionário de procedimento de aproximação de um arquivo em disco", "longer supported. l_xdoc_apx = QtXml.QDomDocument(\"aproximacoes\") assert l_xdoc_apx is not None # erro na", "o evento self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # é um", "\"\"\" # check input assert fdct_root is not None assert fdct_data is not", "l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is not None # faz o parse dos", "def make_apx(self, fdct_root, fdct_data): \"\"\" carrega os dados de procedimento de aproximação a", "l_element = l_node.toElement() assert l_element is not None # o nó é um", "self._event.post(l_evt) # se não for, cai fora... sys.exit(1) # verifica se existe identificação", "self._model = f_model # salva o event manager self._event = f_model.event # recebeu", "partir de um dicionário @param fdct_data: lista de dados de procedimento de aproximação", "nBrk=\"1\"> ... </breakpoint> </aproximacao> \"\"\" # --------------------------------------------------------------------------------------------- def __init__(self, f_model, f_data=None): \"\"\" @param", "a assinatura correta.\") # cria um evento de quit l_evt = events.CQuit() assert", "# obtém o elemento raíz do documento l_elem_root = l_xdoc_apx.documentElement() assert l_elem_root is", "not l_node.isNull(): # tenta converter o nó em um elemento l_element = l_node.toElement()", "\"nApx\" in fdct_data: # cria procedimento de aproximação l_apx = model.CApxNEW(self._model, fdct_data, fdct_root[\"VERSION\"])", "parser.parse_root_element(l_elem_root) # cria uma lista com os elementos de procedimento de aproximação l_node_list" ]
[ "get currents state current_box_state = box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i -", "box and the ground. args: box_state: current (x, z, theta, vx, vz, omega)", "box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques", "finger finger1_state_desired: desired (x_d, z_d) state of the first finger finger2_state: current (x,", "[jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF = R_BW @ p_BF_W # Now", "finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error", "velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) * normal ) # relative velocity", "on each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg)", "properties g = 9.81 dt = 0.001 # seconds per step ###################################### #", "jax.numpy as jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed", "R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W # Get velocity of the", "jnp.array([x_dist, z_dist])) ) return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix", "jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0,", "box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit def", "jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces. Approximate ground force as a damped", "state of the second finger finger2_state_desired: desired (x_d, z_d) state of the second", "new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the", "p_WB = box_state[:2] p_BF_W = p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF =", "contact_d: damping constant of contact returns: contact force in x and z \"\"\"", "# phi = signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi", "finger_pose: current (x, z) state of the finger box_size: side length of box", "finger finger2_state: current (x, z, vx, vz) state of the second finger finger2_state_desired:", "z = 0 half_size = box_size / 2.0 p_BC = jnp.array( [ [-half_size,", "up the forces and torques due to penetration with the ground contact_wrench_on_box =", "- box_size / 2.0) # phi = signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist,", "+ 1e-3) # Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size )", "contact_k, contact_d): \"\"\"Compute the contact force between a finger and the ground. args:", "= (R_WB @ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also", "mu_d / c contact_k = 1000 contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k)", "contact_d): \"\"\"Compute the contact wrench between the box and the ground. args: box_state:", "mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between box and finger", "psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask * c * tangential_velocity + slipping_mask", "# Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the", "1, :] current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i - 1]", "finger, starting at the given initial states and applying the specified control inputs", "in the box frame p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF", "properties finger_mass_kg = 0.1 finger_control_damping = 2 # Contact properties mu_d = 0.7", "mu_d: coefficient of friction between box and ground while slipping c: coefficient of", "box finger1_state_initial: initial (x, z, vx, vz) state of the finger finger1_state_desired_trace: N_steps", "code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the finger (x, z) into", ") ) # corner point velocities in box frame # Transform to world", "each body ###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on", "frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC = p_BC_W +", "normal vector of the contact in the box frame right_or_up = p_BF[1] >", "# Gravitational force on each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces =", "Credit to this stackoverflow answer for the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and-", "of box returns: float signed distance \"\"\" # Credit to this stackoverflow answer", "return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the", "store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store", "psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d", "4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0,", "the torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return", "current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace =", "desired (x_d, z_d) state of the finger over time finger2_state_initial: initial (x, z,", "force as a damped spring, as in # the simplified friction model from", "contact contact_d: damping constant of contact returns: Tuple of - contact wrench on", "calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute the contact", "calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box", "seconds per step ###################################### # Get forces on each body ###################################### finger1_forces =", "finger2_pos_error finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error # Contact forces from ground.", "+ finger_control_stiffness * finger_pos_error finger_forces = finger_forces + finger_control_damping * finger_vel_error # Contact", "velocities in box frame # Transform to world frame v_WC = (R_WB @", "box_force_on_finger ###################################### # Numerically integrate ###################################### # Build the derivatives matrix box_state_dot =", "# https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i, 1] normal_force = -contact_k *", "= v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity)", "# Get the normal vector of the contact in the box frame right_or_up", "1] current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1] #", "on each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3)", "current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace", "= finger_forces + finger_control_damping * finger_vel_error # Contact forces from the ground. box_forces", "= jnp.logical_not(sticking_mask) mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d", "jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body", "= -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces +", "left [-1, 1], # top right [1, -1], # bottom left [1, 1],", "currents state current_box_state = box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i - 1,", "p_WB # Rotate p_BF_W by -theta about the z axis to get position", "current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state)", "by -theta about the z axis to get position in box frame R_WB", "vector # and points in the same direction as the relative velocity tangential_velocity", "box_state_initial: initial (x, z, theta, vx, vz, omega) state of the box finger1_state_initial:", "c = 2.0 psi_s = mu_d / c contact_k = 1000 contact_d =", "initial (x, z, vx, vz) state of the finger finger2_state_desired_trace: N_steps x 2", "as the relative velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) * normal )", "jnp.sign(tangential_velocity) mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d tangent_force", "returns: float signed distance \"\"\" # Credit to this stackoverflow answer for the", "= finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i - 1] # get next", "Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot =", "= box_state + dt * box_state_dot new_finger1_state = finger1_state + dt * finger1_state_dot", "the friction force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the", "current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state)", "tangent vector, which is orthogonal to the normal vector # and points in", "= finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error =", "\"\"\"Compute the contact wrench between the box and the ground. args: box_state: current", "into the world frame contact_force_W = R_WB @ contact_force_B # Add the contact", "jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) )", "# Contact forces from the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c,", "(x_d, z_d) state of the finger over time finger_control_stiffness: the parameter for the", "contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute", "finger_control_damping * finger2_vel_error # Contact forces from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m,", "of the finger in box frame v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W", "contact force between a finger and the ground. args: finger_state: current (x, z,", "psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask * c * tangential_velocity", "- finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces", "box_pose[2] p_BF_W = p_WF - p_WB # Rotate p_BF_W by -theta about the", "Get velocity of contact point in box frame v_Bcontact = box_state[5] * jnp.array([[0,", "finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces", "= normal_force * normal + tangent_force # transform into the world frame contact_force_W", "contact_d: damping constant of contact returns: Tuple of - contact wrench on box", "= jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF = R_BW", "right ] ) ) # corner point velocities in box frame # Transform", "normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get", "0.001 # seconds per step ###################################### # Get forces on each body ######################################", "friction model args: box_state: current (x, z, theta, vx, vz, omega) state of", "scalar, in normal direction normal_force = normal_force - contact_d * normal_velocity * (phi_finger_box", "finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box", "tangential velocity where slipping begins contact_k: spring constant of contact contact_d: damping constant", "< 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu", "the finger stiffness control N_steps: int specifying the number of discrete time steps", "into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC =", "number of discrete time steps to simulate returns: box_state_trace, finger_state_trace \"\"\" # Create", "2.0 p_BC = jnp.array( [ [-half_size, half_size], # top left [half_size, half_size], #", "corner, sum up the forces and torques due to penetration with the ground", "ground contact_wrench_on_box = jnp.zeros(3) for i in range(4): # Get the friction force.", ") R_BW = R_WB.T p_BF = R_BW @ p_BF_W # Now get the", "jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0,", "to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from this", "jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner, sum up the forces and torques", "Numerically integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot =", ") finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact", "finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace", "+= finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate ###################################### # Build the", "finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state", "normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add the friction force to the box", "box_size ) # Clip to only consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box)", "the specified control inputs args: box_state_initial: initial (x, z, theta, vx, vz, omega)", "N_steps, ): \"\"\"Simulate the evolution of the box-finger system with one finger, starting", "finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system with", "finger finger_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the", "contact point in box frame v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]])", "x and z. \"\"\" # Contact point is just the finger point in", "# Numerically integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot", "- 1] current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1]", "box_size, mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute the contact wrench between the", "in the same direction as the relative velocity tangential_velocity = ( v_contactF_B -", ") # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return", "+= calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between", "jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0, -1.0]) # Get the tangent vector,", "finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate", "half_size], # top left [half_size, half_size], # top right [-half_size, -half_size], # bottom", "v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find any that have negative z: min(0,", "phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit", "right ] ) # corner points in box frame # Transform into world", "= sticking_mask * c * tangential_velocity + slipping_mask * mu_d tangent_force = -mu", "frame v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find any", "stackoverflow answer for the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle #", "/ box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] /", "(x_d, z_d) state of the second finger finger_control_stiffness: the parameter for the finger", "for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state,", "the tangent vector, which is orthogonal to the normal vector # and points", "constant of contact contact_d: damping constant of contact returns: contact wrench in x,", "z_dist])) ) return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for", "= box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state)", "new_box_state = box_state + dt * box_state_dot new_finger_state = finger_state + dt *", "box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia)", "and theta. - contact force on finger in x and z. \"\"\" #", "= finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values return box_state_trace, finger_state_trace @jax.jit def", "0.7 c = 2.0 psi_s = mu_d / c contact_k = 1000 contact_d", "contact_k, contact_d): \"\"\"Compute the contact wrench between the box and the ground. args:", "frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF", "* finger1_state_dot new_finger2_state = finger2_state + dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state", "initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for", "/ 2.0 p_BC = jnp.array( [ [-half_size, half_size], # top left [half_size, half_size],", "normal_force = normal_force - contact_d * normal_velocity * (phi_finger_box < 0) sticking_mask =", "box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from this interaction contact_wrench_on_box", "= R_WB.T @ p_BF_W # Get velocity of the finger in box frame", "###################################### # Numerically integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6)", "jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance from", "the ground contact_wrench_on_box = jnp.zeros(3) for i in range(4): # Get the friction", "Clip to only consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the", "1] normal_force = -contact_k * phi_corner_ground[i] normal_force = normal_force - contact_d * normal_velocity", "= 2 # Contact properties mu_d = 0.7 c = 2.0 psi_s =", "= p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left", "* jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0, 1.0]) normal += normal_down *", "# Itegrate new_box_state = box_state + dt * box_state_dot new_finger1_state = finger1_state +", "constant of contact returns: contact wrench in x, z, and theta. \"\"\" #", "to contact pt in box frame v_contactF_B = v_BF - v_Bcontact # Get", "psi_s, contact_k, contact_d): \"\"\"Compute the contact force between a finger and the ground.", "finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s,", "z, and theta. - contact force on finger in x and z. \"\"\"", "* normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d,", "1.0]) normal += normal_down * jnp.array([0.0, -1.0]) # Get the tangent vector, which", "new_box_state = box_state + dt * box_state_dot new_finger1_state = finger1_state + dt *", "jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace =", "the box finger_state_initial: initial (x, z, vx, vz) state of the finger finger_state_desired_trace:", "dt * finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps,", "simplified friction model as used for ground contact normal_force = -contact_k * phi_finger_box", "/ finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] /", "* normal_force # vector! # Sum up the contact forces in the box", "to world frame v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) #", "the 2D rotation matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]]", "penalty method for contact modelling with a simplified Coulomb friction model args: box_state:", "normal ) # relative velocity in tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar,", "finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot =", "jnp.array([[0, -1], [1, 0]]) @ p_BF # Get velocity of finger relative to", "signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size /", "left_or_up = p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up)", "store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace =", "box_force_on_finger2 ###################################### # Numerically integrate ###################################### # Build the derivatives matrix box_state_dot =", "finger2_state: current (x, z, vx, vz) state of the second finger finger2_state_desired: desired", "current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired =", "@jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact force between", "left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal =", "state of the box finger1_state_initial: initial (x, z, vx, vz) state of the", "Get the contact forces. Approximate ground force as a damped spring, as in", "spring constant of contact contact_d: damping constant of contact returns: contact wrench in", "- p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W # Get velocity", "in x and z. \"\"\" # Contact point is just the finger point", "0.0]) normal += normal_up * jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0, -1.0])", "finger finger_control_stiffness: the parameter for the finger stiffness control returns: new_box_state, new_finger_state \"\"\"", "inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the finger", "slipping_mask * mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return", "between box and ground while slipping c: coefficient of tangential velocity in determining", "box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on finger finger_pos_error = finger_state_desired", "-half_size], # bottom right ] ) # corner points in box frame #", "contact force on finger in x and z. \"\"\" # Contact point is", "jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body finger_forces = finger_forces.at[1].add(-g", "v_contactF_B.dot(normal) # scalar, along the normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity +", "find the velocities of each corner point r = jnp.sqrt(2) * half_size v_BC", "the position and velocity of the finger in the world frame p_WF =", "interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench(", "a simplified Coulomb friction model args: box_state: current (x, z, theta, vx, vz,", "Use the same simplified friction model as used for ground contact normal_force =", "[-1, -1], # top left [-1, 1], # top right [1, -1], #", "of the simulation ###################################### # Box properties box_mass_kg = 1.0 box_side_m = 0.5", "def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta),", "each corner, sum up the forces and torques due to penetration with the", "c: coefficient of tangential velocity in determining sticking friction psi_s: tangential velocity where", "jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0, 0.0]) normal += normal_left *", "box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench between the box", "finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation with one", "finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate ###################################### # Build the derivatives", "normal_up * jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0, -1.0]) # Get the", "box frame right_or_up = p_BF[1] > -p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right", "box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger1_forces +=", "frame p_WF = finger_state[:2] v_WF = finger_state[2:] # Get penetration into ground phi_finger_ground", "vz, omega) state of the box finger_state_initial: initial (x, z, vx, vz) state", "jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask * c", "box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values return box_state_trace,", "c * tangential_velocity + slipping_mask * mu_d tangent_force = -mu * normal_force contact_force", "Control forces on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces", "box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute", "= finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness *", "box_state: current (x, z, theta, vx, vz, omega) state of the box box_size:", "velocity of finger relative to contact pt in box frame v_contactF_B = v_BF", "def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute the", "in # the simplified friction model from eq 21 and 22 in #", "the ground. args: finger_state: current (x, z, theta, vx, vz, omega) state of", "and points in the same direction as the relative velocity tangential_velocity = (", "eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1]", "calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state,", "= box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired =", "* r * jnp.array( [ [-1, -1], # top left [-1, 1], #", "normal += normal_up * jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0, -1.0]) #", "that intersect the ground at z = 0 half_size = box_size / 2.0", "box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg)", "over time finger2_state_initial: initial (x, z, vx, vz) state of the finger finger2_state_desired_trace:", "1]) # Find any that have negative z: min(0, signed distance) phi_corner_ground =", "# critical damping # General properties g = 9.81 dt = 0.001 #", "phi = signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi +", "@jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single", "p_BF = R_WB.T @ p_BF_W # Get velocity of the finger in box", "finger_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the finger", "returns: contact force in x and z \"\"\" # Get the position and", "# and points in the same direction as the relative velocity tangential_velocity =", "properties box_mass_kg = 1.0 box_side_m = 0.5 box_inertia = 1 / 6 *", "finger finger_state_desired: desired (x_d, z_d) state of the finger finger_control_stiffness: the parameter for", "state of the box finger_state: current (x, z, vx, vz) state of the", "r = jnp.sqrt(2) * half_size v_BC = ( box_state[5] * r * jnp.array(", "box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system", "v_Bcontact # Get the normal vector of the contact in the box frame", "): \"\"\"Simulate the evolution of the box-finger system with one finger, starting at", "z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi", "@jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench", "state of the finger finger1_state_desired_trace: N_steps x 2 array of desired (x_d, z_d)", "= jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each", "# Simulate for i in range(1, N_steps): # get currents state current_box_state =", "left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0, 0.0])", "z_d) state of the finger over time finger_control_stiffness: the parameter for the finger", "= jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) )", "# vector! # Sum up the contact forces in the box frame contact_force_B", "tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get signed distance", "z_d) state of the second finger finger_control_stiffness: the parameter for the finger stiffness", "and the ground. args: box_state: current (x, z, theta, vx, vz, omega) state", "current_finger_state_desired = finger_state_desired_trace[i - 1] # get next state next_box_state, next_finger_state = box_single_finger_step(", "each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces", "finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired", "of desired (x_d, z_d) state of the finger over time finger2_state_initial: initial (x,", "this stackoverflow answer for the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle", "the box-finger system with one finger, starting at the given initial states and", "jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace =", "slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask * c * tangential_velocity +", "= 1 / 6 * box_mass_kg * box_side_m ** 2 # Finger properties", "and the ground. args: finger_state: current (x, z, theta, vx, vz, omega) state", "finger (x, z) into the box frame p_WF = finger_pose p_WB = box_pose[:2]", "the signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size", "initial (x, z, theta, vx, vz, omega) state of the box finger1_state_initial: initial", "desired (x_d, z_d) state of the finger over time finger_control_stiffness: the parameter for", "return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a", "box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d, c,", "step ###################################### # Get forces on each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces", "/ c contact_k = 1000 contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) #", "the simulation ###################################### # Box properties box_mass_kg = 1.0 box_side_m = 0.5 box_inertia", "contact force to the box and finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W)", "int specifying the number of discrete time steps to simulate returns: box_state_trace, finger_state_trace", "box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance from the box to the finger", "finger_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces", "between box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c,", "to this stackoverflow answer for the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- #", "the evolution of the box-finger system with one finger, starting at the given", "= jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right *", "body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces =", "= box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot =", "box frame # Transform into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB", "finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system with", "the contact wrench between the box and the ground. args: box_state: current (x,", "of the finger finger2_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state", "R_BW = R_WB.T p_BF = R_BW @ p_BF_W # Now get the signed", "mu_d, c, psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s,", "from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d )", "forces from the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k,", "begins contact_k: spring constant of contact contact_d: damping constant of contact returns: contact", ":].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the simulated", "float indicating the side length of the box mu_d: coefficient of friction between", "- v_contactF_B.dot(normal) * normal ) # relative velocity in tangent direction normal_velocity =", "# Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2]", "jnp.zeros(3) # Gravitational force on each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces", "box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces", "contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d ):", "slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask * c * tangential_velocity + slipping_mask *", "relative velocity in tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal", "= tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get signed distance phi_finger_box", "get position in box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] )", "normal_force * normal + tangent_force # transform into the world frame contact_force_W =", "(R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find any that have negative", "velocity of the finger in box frame v_WF = finger_state[2:] v_WB = box_state[3:5]", "mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact force between a finger and", "0.0]) normal += normal_left * jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0, 1.0])", "# scalar, along the normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3)", "contact contact_d: damping constant of contact returns: contact wrench in x, z, and", "frame contact_force_W = R_WB @ contact_force_B # Add the contact force to the", "world frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC = p_BC_W", "the given initial states and applying the specified control inputs args: box_state_initial: initial", "= box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip to only consider negative values", "of discrete time steps to simulate returns: box_state_trace, finger_state_trace \"\"\" # Create arrays", "z) into the box frame p_WF = finger_pose p_WB = box_pose[:2] theta_B =", "Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques", "Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state)", "Sum up the contact forces in the box frame contact_force_B = normal_force *", "jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and(", "finger_state + dt * finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace,", "v_WF = finger_state[2:] # Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) #", "finger finger2_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the", "box_side_m ** 2 # Finger properties finger_mass_kg = 0.1 finger_control_damping = 2 #", "contact contact_d: damping constant of contact returns: contact force in x and z", "return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the", "Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated", "contact wrench between the box and the ground. args: box_state: current (x, z,", "pt in box frame v_contactF_B = v_BF - v_Bcontact # Get the normal", ") # Contact forces between box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state,", "* finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on fingers finger1_pos_error", "contact_k, contact_d ) # Contact forces between box and finger finger_wrench_on_box, box_force_on_finger =", "current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i - 1] # get", "traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4))", "contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size,", "vector! # Sum up the contact forces in the box frame contact_force_B =", "v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force = normal_force - contact_d * normal_velocity", "# Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip", "vz, omega) state of the box finger1_state: current (x, z, vx, vz) state", "tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get signed distance phi_finger_box =", "damping constant of contact returns: Tuple of - contact wrench on box in", "= 0.001 # seconds per step ###################################### # Get forces on each body", "the simplified friction model from eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but", "of the box finger1_state: current (x, z, vx, vz) state of the first", "- 1, :] current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i -", "Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] /", "@jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance from the box to", "- finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness * finger_pos_error finger_forces", "22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i, 1] normal_force =", "= jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace", "box_size / 2.0 p_BC = jnp.array( [ [-half_size, half_size], # top left [half_size,", "( phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <=", "a single discrete-time update for box manipulation with one finger, using the penalty", "Find any that have negative z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:,", "https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i]", "finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces +", "box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state + dt *", "box_size: side length of box returns: float signed distance \"\"\" # Credit to", "of finger relative to contact pt in box frame v_contactF_B = v_BF -", "\"\"\"Compute the signed distance from the box to the finger args: box_pose: current", "frame right_or_up = p_BF[1] > -p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right =", "the finger over time finger2_state_initial: initial (x, z, vx, vz) state of the", "force between a finger and the ground. args: finger_state: current (x, z, theta,", "1.0 box_side_m = 0.5 box_inertia = 1 / 6 * box_mass_kg * box_side_m", "mu_d = 0.7 c = 2.0 psi_s = mu_d / c contact_k =", "contact returns: contact wrench in x, z, and theta. \"\"\" # Start by", "- v_WB v_BF = R_WB.T @ v_BF_W # Get velocity of contact point", "to the box and finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench =", "finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i - 1] # get next state", "box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in range(1, N_steps):", "* box_mass_kg) # Control forces on finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error", "normal_velocity = v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force = normal_force - contact_d", "1, :] current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1]", "= finger_state[:2] v_WF = finger_state[2:] # Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1),", "the same direction as the relative velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal)", "finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system with one", "the first finger finger2_state: current (x, z, vx, vz) state of the second", "[-half_size, -half_size], # bottom left [half_size, -half_size], # bottom right ] ) #", "box_state[3:5] v_BF_W = v_WF - v_WB v_BF = R_WB.T @ v_BF_W # Get", "] ) ) # corner point velocities in box frame # Transform to", "the finger point in the box frame p_WF = finger_state[:2] p_WB = box_state[:2]", "x, z, and theta. - contact force on finger in x and z.", "R_WB.T @ p_BF_W # Get velocity of the finger in box frame v_WF", "Get the tangent vector, which is orthogonal to the normal vector # and", "bottom left [half_size, -half_size], # bottom right ] ) # corner points in", "# transform into the world frame contact_force_W = R_WB @ contact_force_B # Add", "state current_box_state = box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i - 1, :]", "frame # Transform into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @", "def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance from the box to the", "of the finger in the world frame p_WF = finger_state[:2] v_WF = finger_state[2:]", "# bottom left [1, 1], # bottom right ] ) ) # corner", "get next state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) #", "of the second finger finger_control_stiffness: the parameter for the finger stiffness control returns:", "the finger in box frame v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W =", "6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0,", "each corner point r = jnp.sqrt(2) * half_size v_BC = ( box_state[5] *", "1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get next state next_box_state, next_finger1_state,", "* finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ):", "finger1_state: current (x, z, vx, vz) state of the first finger finger1_state_desired: desired", "about the z axis to get position in box frame R_WB = jnp.array(", "0.5 box_inertia = 1 / 6 * box_mass_kg * box_side_m ** 2 #", "= finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate", "contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d ) #", "finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in range(1, N_steps):", "vx, vz, omega) state of the box mu_d: coefficient of friction between box", "the contact forces. Approximate ground force as a damped spring, as in #", "omega) state of the box finger_state: current (x, z, vx, vz) state of", "is orthogonal to the normal vector # and points in the same direction", "each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) #", "= box_size / 2.0 p_BC = jnp.array( [ [-half_size, half_size], # top left", "jnp.zeros(3) for i in range(4): # Get the friction force. Approximate ground force", "state of the box finger_state_initial: initial (x, z, vx, vz) state of the", "Gravitational force on each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g", "in determining sticking friction psi_s: tangential velocity where slipping begins contact_k: spring constant", "= 0.7 c = 2.0 psi_s = mu_d / c contact_k = 1000", "of the first finger finger2_state: current (x, z, vx, vz) state of the", "in box frame v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF", "= -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness * finger_pos_error finger_forces = finger_forces +", "1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i - 1, :]", "box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size", "/ 2.0), p_BF[1] - box_size / 2.0) # phi = signed distance. phi", "box_mass_kg = 1.0 box_side_m = 0.5 box_inertia = 1 / 6 * box_mass_kg", "return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d", "v_BF_W = v_WF - v_WB v_BF = R_WB.T @ v_BF_W # Get velocity", "vz, omega) state of the box box_size: float indicating the side length of", "p_BF[1] - box_size / 2.0) # phi = signed distance. phi = jnp.minimum(0.0,", "finger box_size: float indicating the side length of the box mu_d: coefficient of", "2 # Finger properties finger_mass_kg = 0.1 finger_control_damping = 2 # Contact properties", "stiffness control returns: new_box_state, new_finger_state \"\"\" ###################################### # define parameters of the simulation", "# Gravitational force on each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces =", "(x, z, theta, vx, vz, omega) state of the box finger1_state_initial: initial (x,", "scalar, along the normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) +", "= p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also find the velocities of each", "jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) #", "box_state + dt * box_state_dot new_finger_state = finger_state + dt * finger_state_dot return", "0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu =", "signed distance from the box to the finger args: box_pose: current (x, z,", "new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of", "# First transform the finger (x, z) into the box frame p_WF =", "box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of", "contact_d ) # Contact forces between box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench(", "\"\"\"Compute a single discrete-time update for box manipulation with one finger, using the", "# Contact forces between box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state,", "/ 6 * box_mass_kg * box_side_m ** 2 # Finger properties finger_mass_kg =", "the second finger finger_control_stiffness: the parameter for the finger stiffness control returns: new_box_state,", "calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact force between a finger", "current (x, z, theta, vx, vz, omega) state of the box mu_d: coefficient", "have negative z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For", "finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot =", "the finger finger_state_desired: desired (x_d, z_d) state of the finger finger_control_stiffness: the parameter", "[ [-half_size, half_size], # top left [half_size, half_size], # top right [-half_size, -half_size],", "Also find the velocities of each corner point r = jnp.sqrt(2) * half_size", "jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF = R_BW @ p_BF_W # Now get", "+ slipping_mask * mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2)", "(x, z) into the box frame p_WF = finger_pose p_WB = box_pose[:2] theta_B", "state of the box box_size: float indicating the side length of the box", "z, vx, vz) state of the second finger finger2_state_desired: desired (x_d, z_d) state", "finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness * finger_pos_error", "finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically integrate ###################################### # Build the derivatives", "point is just the finger point in the box frame p_WF = finger_state[:2]", "p_BF # Get velocity of finger relative to contact pt in box frame", "box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces += box_force_on_finger", "c, psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2", "current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace =", "mu_d, c, psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s,", "jnp.array( [ [-half_size, half_size], # top left [half_size, half_size], # top right [-half_size,", "* finger1_pos_error finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired -", "psi_s = mu_d / c contact_k = 1000 contact_d = 2 * jnp.sqrt(box_mass_kg", "get the signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] -", "# Get the friction force. Approximate ground force as a damped spring, as", "spring constant of contact contact_d: damping constant of contact returns: contact force in", "control N_steps: int specifying the number of discrete time steps to simulate returns:", "# rectangle # First transform the finger (x, z) into the box frame", "= box_state[:2] p_BF_W = p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T", "the box to the finger args: box_pose: current (x, z, theta) state of", "# Add the friction force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also", "on each body ###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force", "Transform into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC", "[-1, 1], # top right [1, -1], # bottom left [1, 1], #", "finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a", "= jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces. Approximate ground force as a", "jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size / 2.0) # phi =", "sticking friction psi_s: tangential velocity where slipping begins contact_k: spring constant of contact", "box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot =", "\"\"\"Simulate the evolution of the box-finger system with one finger, starting at the", "finger_vel_error = -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness * finger_pos_error finger_forces = finger_forces", "current (x, z, theta, vx, vz, omega) state of the box finger_state: current", "world frame p_WF = finger_state[:2] v_WF = finger_state[2:] # Get penetration into ground", "up the contact forces in the box frame contact_force_B = normal_force * normal", "@jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for angle theta\"\"\" return jnp.array(", "normal_velocity * (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask", "into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces. Approximate ground", "# Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial)", "box finger1_state: current (x, z, vx, vz) state of the first finger finger1_state_desired:", "forces on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces =", "matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot", "eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i,", "c, psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k,", "vx, vz, omega) state of the box finger_state_initial: initial (x, z, vx, vz)", "transform into the world frame contact_force_W = R_WB @ contact_force_B # Add the", "= -mu * normal_force # vector! # Sum up the contact forces in", "distance \"\"\" # Credit to this stackoverflow answer for the inspiration for this", "Start by finding any box corner points that intersect the ground at z", "second finger finger2_state_desired: desired (x_d, z_d) state of the second finger finger_control_stiffness: the", "i in range(1, N_steps): # get currents state current_box_state = box_state_trace[i - 1,", "p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left =", "# Control forces on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:]", "current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state)", "sticking_mask * c * tangential_velocity + slipping_mask * mu_d tangent_force = -mu *", "= ( v_contactF_B - v_contactF_B.dot(normal) * normal ) # relative velocity in tangent", "contact_d ) box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate ######################################", "theta. - contact force on finger in x and z. \"\"\" # Contact", "box and finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W))", ":] current_finger_state_desired = finger_state_desired_trace[i - 1] # get next state next_box_state, next_finger_state =", "the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate", "in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i, 1] normal_force = -contact_k", "for ground contact normal_force = -contact_k * phi_finger_box # scalar, in normal direction", "finger_forces + finger_control_stiffness * finger_pos_error finger_forces = finger_forces + finger_control_damping * finger_vel_error #", "v_BC = ( box_state[5] * r * jnp.array( [ [-1, -1], # top", "(x, z) state of the finger box_size: side length of box returns: float", "= (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find any that have", "###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) #", "state of the finger finger_control_stiffness: the parameter for the finger stiffness control returns:", "in the world frame p_WF = finger_state[:2] v_WF = finger_state[2:] # Get penetration", "simulated values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired,", "length of the box mu_d: coefficient of friction between box and ground while", "torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box", "steps to simulate returns: box_state_trace, finger_state_trace \"\"\" # Create arrays to store simulation", "vector, which is orthogonal to the normal vector # and points in the", "vz) state of the finger box_size: float indicating the side length of the", "with the ground contact_wrench_on_box = jnp.zeros(3) for i in range(4): # Get the", "current (x, z, vx, vz) state of the first finger finger1_state_desired: desired (x_d,", "box finger_pose: current (x, z) state of the finger box_size: side length of", "the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the", "finger_state[:2], box_size ) # Clip to only consider negative values phi_finger_box = jnp.minimum(0,", "(x, z, theta, vx, vz, omega) state of the box box_size: float indicating", "sticking_mask * c * tangential_velocity + slipping_mask * mu_d * tangent tangent_force =", "= 1000 contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping #", "Return the simulated values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired,", "mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ######################################", "= finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the simulated values return", "(x, z, vx, vz) state of the second finger finger2_state_desired: desired (x_d, z_d)", "https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force", "# define parameters of the simulation ###################################### # Box properties box_mass_kg = 1.0", "and z. \"\"\" # Contact point is just the finger point in the", "> p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up),", "vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get signed", "box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on fingers finger1_pos_error = finger1_state_desired", "= finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in range(1,", "the number of discrete time steps to simulate returns: box_state_trace, finger_state_trace \"\"\" #", "+ finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces", "contact_force_B # Add the contact force to the box and finger box_wrench =", "box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2]", "finger1_state_desired: desired (x_d, z_d) state of the first finger finger2_state: current (x, z,", "v_BF - v_Bcontact # Get the normal vector of the contact in the", "normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c,", "between box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c,", "world frame v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find", "current (x, z, vx, vz) state of the second finger finger2_state_desired: desired (x_d,", "# Itegrate new_box_state = box_state + dt * box_state_dot new_finger_state = finger_state +", "jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) #", "* jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0, 0.0]) normal += normal_up *", "contact_k = 1000 contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping", "next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace", "= finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error", "z, vx, vz) state of the first finger finger1_state_desired: desired (x_d, z_d) state", "= R_WB @ contact_force_B # Add the contact force to the box and", "# Get forces on each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2)", "contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d ) finger2_forces", ":].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values return box_state_trace, finger_state_trace", "finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg)", "half_size = box_size / 2.0 p_BC = jnp.array( [ [-half_size, half_size], # top", "finger_state: current (x, z, vx, vz) state of the finger box_size: float indicating", "* normal ) # relative velocity in tangent direction normal_velocity = v_contactF_B.dot(normal) #", "@ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also find the", "= normal_force - contact_d * normal_velocity * (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity", "= ( box_state[5] * r * jnp.array( [ [-1, -1], # top left", "the box finger1_state: current (x, z, vx, vz) state of the first finger", "of contact contact_d: damping constant of contact returns: Tuple of - contact wrench", "+ slipping_mask * mu_d * tangent tangent_force = -mu * normal_force # vector!", "the ground. args: box_state: current (x, z, theta, vx, vz, omega) state of", "@ p_BF_W # Now get the signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size", "normal_force = -contact_k * phi_finger_box # scalar, in normal direction normal_force = normal_force", "\"\"\" ###################################### # define parameters of the simulation ###################################### # Box properties box_mass_kg", "[4, 1]) # Find any that have negative z: min(0, signed distance) phi_corner_ground", "forces on each body ###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational", "# get next state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, )", "= finger_state_desired_trace[i - 1] # get next state next_box_state, next_finger_state = box_single_finger_step( current_box_state,", "+ dt * finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness,", "- box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] -", "ground. args: finger_state: current (x, z, theta, vx, vz, omega) state of the", "the box finger_state: current (x, z, vx, vz) state of the finger box_size:", "half_size], # top right [-half_size, -half_size], # bottom left [half_size, -half_size], # bottom", "finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger", "finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF - v_WB v_BF = R_WB.T @", "# get currents state current_box_state = box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i", "properties mu_d = 0.7 c = 2.0 psi_s = mu_d / c contact_k", "phi_corner_ground[i] normal_force = normal_force - contact_d * normal_velocity * ( phi_corner_ground[i] < 0", "distance x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size / 2.0)", "contact_force_B = normal_force * normal + tangent_force # transform into the world frame", "= -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add the friction force", "point r = jnp.sqrt(2) * half_size v_BC = ( box_state[5] * r *", "calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box", "box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip to only consider negative values phi_finger_box", "force in x and z \"\"\" # Get the position and velocity of", "in range(1, N_steps): # get currents state current_box_state = box_state_trace[i - 1, :]", "finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time", "Contact properties mu_d = 0.7 c = 2.0 psi_s = mu_d / c", "2 array of desired (x_d, z_d) state of the finger over time finger_control_stiffness:", "# Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state +", "the contact forces in the box frame contact_force_B = normal_force * normal +", "box corner points that intersect the ground at z = 0 half_size =", "finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces =", "derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities", "finger_forces += box_force_on_finger ###################################### # Numerically integrate ###################################### # Build the derivatives matrix", "finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces", "# Clip to only consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use", "parameter for the finger stiffness control N_steps: int specifying the number of discrete", "Tuple of - contact wrench on box in x, z, and theta. -", "( v_contactF_B - v_contactF_B.dot(normal) * normal ) # relative velocity in tangent direction", "next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) #", "specifying the number of discrete time steps to simulate returns: box_state_trace, finger_state_trace \"\"\"", "force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from", "of the box finger_state_initial: initial (x, z, vx, vz) state of the finger", "for the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform", "of contact point in box frame v_Bcontact = box_state[5] * jnp.array([[0, -1], [1,", "state of the first finger finger1_state_desired: desired (x_d, z_d) state of the first", "= finger_state + dt * finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial,", "JAX\"\"\" import jax.numpy as jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute", "vz, omega) state of the box mu_d: coefficient of friction between box and", "on finger in x and z. \"\"\" # Contact point is just the", "corner points that intersect the ground at z = 0 half_size = box_size", "9.81 dt = 0.001 # seconds per step ###################################### # Get forces on", "\"\"\"Automatically-differentiable manipulation simulation engine using JAX\"\"\" import jax.numpy as jnp import jax @jax.jit", "+ finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error # Contact", "determining sticking friction psi_s: tangential velocity where slipping begins contact_k: spring constant of", "= box_pose[2] p_BF_W = p_WF - p_WB # Rotate p_BF_W by -theta about", "finger2_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between box and", "to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) #", "Get velocity of finger relative to contact pt in box frame v_contactF_B =", "-mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size,", "Get velocity of the finger in box frame v_WF = finger_state[2:] v_WB =", "direction normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal vector tangent = tangential_velocity", "finger_control_stiffness: the parameter for the finger stiffness control N_steps: int specifying the number", "rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)],", "contact forces. Approximate ground force as a damped spring, as in # the", "= finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i", "and velocity of the finger in the world frame p_WF = finger_state[:2] v_WF", "on finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces", "###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body", "finger1_state_initial: initial (x, z, vx, vz) state of the finger finger1_state_desired_trace: N_steps x", "omega) state of the box finger1_state: current (x, z, vx, vz) state of", "finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system with one finger,", "Control forces on finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces", "1000 contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping # General", "the box frame contact_force_B = normal_force * normal + tangent_force # transform into", "= finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate", "of contact returns: contact force in x and z \"\"\" # Get the", "state of the second finger finger_control_stiffness: the parameter for the finger stiffness control", "finger1_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the finger", "= box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces /", "z, theta, vx, vz, omega) state of the box finger1_state_initial: initial (x, z,", "= normal_right * jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0, 0.0]) normal +=", "the finger box_size: float indicating the side length of the box mu_d: coefficient", "calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box", "finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation with", "2 array of desired (x_d, z_d) state of the finger over time finger2_state_initial:", "tangential_velocity + slipping_mask * mu_d * tangent tangent_force = -mu * normal_force #", "finger_state_trace \"\"\" # Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6))", ") box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically integrate ###################################### #", "finger2_state_desired_trace[i - 1] # get next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state,", "signed distance \"\"\" # Credit to this stackoverflow answer for the inspiration for", "get currents state current_box_state = box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i -", "Now get the signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0]", "the same simplified friction model as used for ground contact normal_force = -contact_k", "-half_size], # bottom left [half_size, -half_size], # bottom right ] ) # corner", "min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner, sum", "= contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state,", "in box frame # Transform into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W =", "g = 9.81 dt = 0.001 # seconds per step ###################################### # Get", "simulation ###################################### # Box properties box_mass_kg = 1.0 box_side_m = 0.5 box_inertia =", "= v_BF - v_Bcontact # Get the normal vector of the contact in", "finger_state[2:] # Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the", "damping constant of contact returns: contact force in x and z \"\"\" #", "forces from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d", "finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions", "box_state_trace, finger_state_trace \"\"\" # Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps,", "of the box finger_pose: current (x, z) state of the finger box_size: side", "box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot", "= rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W # Get velocity of the finger", "= 0.1 finger_control_damping = 2 # Contact properties mu_d = 0.7 c =", "= box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state,", "c contact_k = 1000 contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical", "in the box frame right_or_up = p_BF[1] > -p_BF[0] left_or_up = p_BF[1] >", "= box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot", "tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity)", "sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask", "finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c,", "which is orthogonal to the normal vector # and points in the same", "contact_d * normal_velocity * (phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity)", "derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:])", "= -contact_k * phi_finger_box # scalar, in normal direction normal_force = normal_force -", "# For each corner, sum up the forces and torques due to penetration", "finger finger2_state_desired: desired (x_d, z_d) state of the second finger finger_control_stiffness: the parameter", "= box_forces.at[1].add(-g * box_mass_kg) # Control forces on fingers finger1_pos_error = finger1_state_desired -", "@ v_BF_W # Get velocity of contact point in box frame v_Bcontact =", "args: box_state: current (x, z, theta, vx, vz, omega) state of the box", "/ box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot new_finger_state =", "right [-half_size, -half_size], # bottom left [half_size, -half_size], # bottom right ] )", "* box_mass_kg) # Control forces on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error", "finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces", "contact_force = jnp.array([tangent_force, normal_force]) # Add the friction force to the box contact_wrench_on_box", "finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system", "dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial,", "+ dt * box_state_dot new_finger1_state = finger1_state + dt * finger1_state_dot new_finger2_state =", "distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner, sum up the", "finger2_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the finger", "normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute", "v_BF_W # Get velocity of contact point in box frame v_Bcontact = box_state[5]", "Box properties box_mass_kg = 1.0 box_side_m = 0.5 box_inertia = 1 / 6", "with one finger, using the penalty method for contact modelling with a simplified", "in range(4): # Get the friction force. Approximate ground force as a damped", "negative z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each", ") # relative velocity in tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar, along", "the finger stiffness control returns: new_box_state, new_finger_state \"\"\" ###################################### # define parameters of", "vz) state of the finger finger1_state_desired_trace: N_steps x 2 array of desired (x_d,", "of contact returns: contact wrench in x, z, and theta. \"\"\" # Start", "that have negative z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) #", "new_finger1_state = finger1_state + dt * finger1_state_dot new_finger2_state = finger2_state + dt *", "(phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask)", "\"\"\" # Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace", "frame v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF - v_WB v_BF", "# Get velocity of finger relative to contact pt in box frame v_contactF_B", "* finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace,", "state of the finger finger2_state_desired_trace: N_steps x 2 array of desired (x_d, z_d)", "= finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot", "define parameters of the simulation ###################################### # Box properties box_mass_kg = 1.0 box_side_m", "corner points in box frame # Transform into world frame R_WB = rotation_matrix(box_state[2])", "p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also find the velocities of each corner", "finger_control_stiffness * finger_pos_error finger_forces = finger_forces + finger_control_damping * finger_vel_error # Contact forces", "# Contact forces from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s,", "finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on fingers finger1_pos_error =", "manipulation with one finger, using the penalty method for contact modelling with a", "finger_state_desired_trace[i - 1] # get next state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state,", "x and z \"\"\" # Get the position and velocity of the finger", ") # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace =", "+ 1e-3) + 1e-3) # Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2],", "phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for angle theta\"\"\" return", "box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s,", "2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size / 2.0)", "[jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the", "discrete-time update for box manipulation with one finger, using the penalty method for", "per step ###################################### # Get forces on each body ###################################### finger_forces = jnp.zeros(2)", "+= box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k,", "* finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) #", "# Get the tangent vector, which is orthogonal to the normal vector #", "currents state current_box_state = box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i - 1,", "box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces", "\"\"\"Compute the contact force between a finger and the ground. args: finger_state: current", "= jnp.zeros(3) for i in range(4): # Get the friction force. Approximate ground", "): \"\"\"Compute the contact wrench between the box and the ground. args: box_state:", "in box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW =", "= v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu", "velocity where slipping begins contact_k: spring constant of contact contact_d: damping constant of", "finger_state_initial: initial (x, z, vx, vz) state of the finger finger_state_desired_trace: N_steps x", "# Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace =", "(x, z, vx, vz) state of the finger finger1_state_desired_trace: N_steps x 2 array", "mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d * tangent", "of the box finger_state: current (x, z, vx, vz) state of the finger", "N_steps): # get currents state current_box_state = box_state_trace[i - 1, :] current_finger1_state =", "finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness * finger_pos_error finger_forces =", "z_d) state of the finger finger_control_stiffness: the parameter for the finger stiffness control", "of tangential velocity in determining sticking friction psi_s: tangential velocity where slipping begins", "box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c,", "matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def", "p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF - p_WB # Rotate", "i in range(4): # Get the friction force. Approximate ground force as a", "v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu =", "the box frame p_WF = finger_pose p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W", "box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces", "rotation matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit", "range(4): # Get the friction force. Approximate ground force as a damped spring,", "box and ground while slipping c: coefficient of tangential velocity in determining sticking", "desired (x_d, z_d) state of the second finger finger_control_stiffness: the parameter for the", "the finger (x, z) into the box frame p_WF = finger_pose p_WB =", "right [1, -1], # bottom left [1, 1], # bottom right ] )", "@jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute", "jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0, 0.0]) normal += normal_left", "contact_k, contact_d ): \"\"\"Compute the contact wrench between the box and the ground.", "normal + tangent_force # transform into the world frame contact_force_W = R_WB @", "finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces", "###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot", "- contact_d * normal_velocity * ( phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i,", "corner point r = jnp.sqrt(2) * half_size v_BC = ( box_state[5] * r", "# Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces", ":], contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c,", "of the second finger finger2_state_desired: desired (x_d, z_d) state of the second finger", "box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace", "new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate", "finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation with", "= finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i", "- contact force on finger in x and z. \"\"\" # Contact point", "z_d) state of the finger over time finger2_state_initial: initial (x, z, vx, vz)", "box-finger system with one finger, starting at the given initial states and applying", "second finger finger_control_stiffness: the parameter for the finger stiffness control returns: new_box_state, new_finger_state", "finger1_state_dot new_finger2_state = finger2_state + dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def", "Rotate p_BF_W by -theta about the z axis to get position in box", "finger over time finger_control_stiffness: the parameter for the finger stiffness control N_steps: int", "box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg)", "current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i,", "(jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3],", "###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force", "position in box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW", "box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the", "2.0) # phi = signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi =", "box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the", "box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state,", "simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps,", "< 0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask)", "current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state =", "phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s", "force to the box and finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench", "(x, z, vx, vz) state of the finger finger2_state_desired_trace: N_steps x 2 array", "calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d,", "finger2_state + dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial,", "def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench between", "- contact wrench on box in x, z, and theta. - contact force", "any box corner points that intersect the ground at z = 0 half_size", "ground contact normal_force = -contact_k * phi_finger_box # scalar, in normal direction normal_force", "Contact point is just the finger point in the box frame p_WF =", "to get position in box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]]", "theta, vx, vz, omega) state of the box finger1_state_initial: initial (x, z, vx,", "* mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force", "with damping. normal_velocity = v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i] normal_force =", "on box in x, z, and theta. - contact force on finger in", "R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF =", "4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0,", "z \"\"\" # Get the position and velocity of the finger in the", "p_BF[1] > -p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up", "contact_k: spring constant of contact contact_d: damping constant of contact returns: contact wrench", "normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right", "Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces.", "starting at the given initial states and applying the specified control inputs args:", "###################################### # Box properties box_mass_kg = 1.0 box_side_m = 0.5 box_inertia = 1", "for i in range(4): # Get the friction force. Approximate ground force as", "from the box to the finger args: box_pose: current (x, z, theta) state", "= jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0, 0.0]) normal +=", "# Control forces on finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:]", "the parameter for the finger stiffness control N_steps: int specifying the number of", "normal_force - contact_d * normal_velocity * ( phi_corner_ground[i] < 0 ) tangential_velocity =", "Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps,", "finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state", "box_mass_kg) # Control forces on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error =", "= finger1_state + dt * finger1_state_dot new_finger2_state = finger2_state + dt * finger2_state_dot", "(x, z, vx, vz) state of the finger box_size: float indicating the side", "z, theta, vx, vz, omega) state of the box box_size: float indicating the", "finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg)", "answer for the inspiration for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First", "= finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on", "box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces", "box_state + dt * box_state_dot new_finger1_state = finger1_state + dt * finger1_state_dot new_finger2_state", "vz) state of the finger finger_state_desired_trace: N_steps x 2 array of desired (x_d,", "step ###################################### # Get forces on each body ###################################### finger_forces = jnp.zeros(2) box_forces", "of the box-finger system with one finger, starting at the given initial states", "state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace", "force on each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g *", "normal direction normal_force = normal_force - contact_d * normal_velocity * (phi_finger_box < 0)", "wrench on box in x, z, and theta. - contact force on finger", "Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace", "finger1_state + dt * finger1_state_dot new_finger2_state = finger2_state + dt * finger2_state_dot return", "return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for angle theta\"\"\"", "box_size / 2.0) # phi = signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist))", "psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 =", "box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces", "coefficient of tangential velocity in determining sticking friction psi_s: tangential velocity where slipping", "# Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) #", "mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d tangent_force =", "# corner point velocities in box frame # Transform to world frame v_WC", "mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench between the box and", "box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger", "-theta about the z axis to get position in box frame R_WB =", "finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot =", "state of the first finger finger2_state: current (x, z, vx, vz) state of", "normal_force # vector! # Sum up the contact forces in the box frame", "integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4)", "the finger finger2_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of", "contact_k) # critical damping # General properties g = 9.81 dt = 0.001", "p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also find the velocities", "as jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance", "the friction force. Approximate ground force as a damped spring, as in #", "finger point in the box frame p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W", "return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s,", "/ 2.0), p_BF[0] - box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size /", "# Contact forces between box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state,", "jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance from the box", "box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot new_finger1_state = finger1_state", "Contact forces from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k,", "of the finger finger_state_desired: desired (x_d, z_d) state of the finger finger_control_stiffness: the", "state of the box mu_d: coefficient of friction between box and ground while", "contact_k: spring constant of contact contact_d: damping constant of contact returns: Tuple of", "= box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit", "finding any box corner points that intersect the ground at z = 0", "frame v_contactF_B = v_BF - v_Bcontact # Get the normal vector of the", "damping # General properties g = 9.81 dt = 0.001 # seconds per", "float signed distance \"\"\" # Credit to this stackoverflow answer for the inspiration", "0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu =", "over time finger_control_stiffness: the parameter for the finger stiffness control N_steps: int specifying", "/ finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state =", "v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF - v_WB v_BF =", "orthogonal to the normal vector # and points in the same direction as", "[half_size, half_size], # top right [-half_size, -half_size], # bottom left [half_size, -half_size], #", "from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit", "penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces. Approximate", "psi_s: tangential velocity where slipping begins contact_k: spring constant of contact contact_d: damping", "box_state: current (x, z, theta, vx, vz, omega) state of the box finger_state:", "= phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit def", "Also add the torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force)", "= finger_forces + finger_control_stiffness * finger_pos_error finger_forces = finger_forces + finger_control_damping * finger_vel_error", "For each corner, sum up the forces and torques due to penetration with", "+= calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state,", "contact_d * normal_velocity * (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <=", "p_BF_W # Get velocity of the finger in box frame v_WF = finger_state[2:]", "desired (x_d, z_d) state of the finger finger_control_stiffness: the parameter for the finger", "= box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace =", "= box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in range(1,", "= R_WB.T p_BF = R_BW @ p_BF_W # Now get the signed distance", "Get the position and velocity of the finger in the world frame p_WF", "wrench in x, z, and theta. \"\"\" # Start by finding any box", "box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces", ":].set(next_finger_state) # Return the simulated values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state,", ") return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s, contact_k,", "contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d ) #", "time finger_control_stiffness: the parameter for the finger stiffness control N_steps: int specifying the", "jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1]", "(x, z, vx, vz) state of the first finger finger1_state_desired: desired (x_d, z_d)", "finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness", "omega) state of the box finger1_state_initial: initial (x, z, vx, vz) state of", "finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state", "conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial)", "# Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot", "p_BF_W by -theta about the z axis to get position in box frame", "Add the contact force to the box and finger box_wrench = jnp.zeros(3) box_wrench", "z_d) state of the first finger finger2_state: current (x, z, vx, vz) state", "axis to get position in box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B),", "new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ):", "p_WF - p_WB # Rotate p_BF_W by -theta about the z axis to", "6 * box_mass_kg * box_side_m ** 2 # Finger properties finger_mass_kg = 0.1", "\"\"\" # Contact point is just the finger point in the box frame", "box frame p_WF = finger_pose p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W =", "to simulate returns: box_state_trace, finger_state_trace \"\"\" # Create arrays to store simulation traces", "p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W # Get", "(x, z, vx, vz) state of the finger finger_state_desired_trace: N_steps x 2 array", "21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1] normal_force", "* box_state_dot new_finger1_state = finger1_state + dt * finger1_state_dot new_finger2_state = finger2_state +", "finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg)", "v_contactF_B = v_BF - v_Bcontact # Get the normal vector of the contact", "finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d )", "-contact_k * phi_finger_box # scalar, in normal direction normal_force = normal_force - contact_d", "initial (x, z, theta, vx, vz, omega) state of the box finger_state_initial: initial", "range(1, N_steps): # get currents state current_box_state = box_state_trace[i - 1, :] current_finger1_state", "= jnp.array([tangent_force, normal_force]) # Add the friction force to the box contact_wrench_on_box =", "box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot =", "Get the normal vector of the contact in the box frame right_or_up =", "where slipping begins contact_k: spring constant of contact contact_d: damping constant of contact", "normal_force = normal_force - contact_d * normal_velocity * ( phi_corner_ground[i] < 0 )", "of the finger box_size: float indicating the side length of the box mu_d:", "normal += normal_down * jnp.array([0.0, -1.0]) # Get the tangent vector, which is", "(phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask =", "- 1, :] current_finger_state_desired = finger_state_desired_trace[i - 1] # get next state next_box_state,", "# top left [-1, 1], # top right [1, -1], # bottom left", "psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d", "body ###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational", ":].set(finger_state_initial) # Simulate for i in range(1, N_steps): # get currents state current_box_state", "0]]) @ p_BF # Get velocity of finger relative to contact pt in", "returns: contact wrench in x, z, and theta. \"\"\" # Start by finding", "finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate ###################################### # Build the derivatives matrix", "in box frame # Transform to world frame v_WC = (R_WB @ v_BC.T).T", "p_BF = R_BW @ p_BF_W # Now get the signed distance x_dist =", "* box_side_m ** 2 # Finger properties finger_mass_kg = 0.1 finger_control_damping = 2", "-1], # bottom left [1, 1], # bottom right ] ) ) #", "into the box frame p_WF = finger_pose p_WB = box_pose[:2] theta_B = box_pose[2]", "[1, -1], # bottom left [1, 1], # bottom right ] ) )", "c, psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k,", "of desired (x_d, z_d) state of the finger over time finger_control_stiffness: the parameter", "vector of the contact in the box frame right_or_up = p_BF[1] > -p_BF[0]", "contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d )", "N_steps: int specifying the number of discrete time steps to simulate returns: box_state_trace,", "psi_s, contact_k, contact_d ) # Contact forces between box and fingers finger1_wrench_on_box, box_force_on_finger1", "N_steps x 2 array of desired (x_d, z_d) state of the finger over", "c, psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench between the box and the", "= box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired =", "vx, vz, omega) state of the box finger1_state_initial: initial (x, z, vx, vz)", "new_finger_state \"\"\" ###################################### # define parameters of the simulation ###################################### # Box properties", "friction between box and ground while slipping c: coefficient of tangential velocity in", "finger_pose p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF - p_WB #", "contact in the box frame right_or_up = p_BF[1] > -p_BF[0] left_or_up = p_BF[1]", "p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up)", "\"\"\"Return the 2D rotation matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta),", "normal += normal_left * jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0, 1.0]) normal", "the finger args: box_pose: current (x, z, theta) state of the box finger_pose:", "z, vx, vz) state of the finger finger_state_desired: desired (x_d, z_d) state of", "contact pt in box frame v_contactF_B = v_BF - v_Bcontact # Get the", "jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return", "vz) state of the second finger finger2_state_desired: desired (x_d, z_d) state of the", "half_size v_BC = ( box_state[5] * r * jnp.array( [ [-1, -1], #", "Simulate for i in range(1, N_steps): # get currents state current_box_state = box_state_trace[i", "<= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask * c *", "used for ground contact normal_force = -contact_k * phi_finger_box # scalar, in normal", "args: finger_state: current (x, z, theta, vx, vz, omega) state of the box", "simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store the", "of the box finger1_state_initial: initial (x, z, vx, vz) state of the finger", "= calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces +=", "box frame v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF #", "def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact force between a", "normal_force = normal_force - contact_d * normal_velocity * (phi_finger_ground < 0) tangential_velocity =", "model from eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity", "Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) #", "<= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask * c * tangential_velocity +", "finger_forces = finger_forces + finger_control_damping * finger_vel_error # Contact forces from the ground.", "side length of box returns: float signed distance \"\"\" # Credit to this", "# Get velocity of the finger in box frame v_WF = finger_state[2:] v_WB", "= jnp.minimum(0, phi_finger_box) # Use the same simplified friction model as used for", "next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace =", "in normal direction normal_force = normal_force - contact_d * normal_velocity * (phi_finger_box <", "# Get velocity of contact point in box frame v_Bcontact = box_state[5] *", "@ contact_force_B # Add the contact force to the box and finger box_wrench", "# stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the finger (x, z) into the", "< 0 ) tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask", "###################################### # Get forces on each body ###################################### finger_forces = jnp.zeros(2) box_forces =", "system with one finger, starting at the given initial states and applying the", "current (x, z) state of the finger box_size: side length of box returns:", "* finger2_vel_error # Contact forces from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d,", "@ p_BF # Get velocity of finger relative to contact pt in box", "@jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update", "ground. args: box_state: current (x, z, theta, vx, vz, omega) state of the", "+ jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit def rotation_matrix(theta): \"\"\"Return", "critical damping # General properties g = 9.81 dt = 0.001 # seconds", "method for contact modelling with a simplified Coulomb friction model args: box_state: current", "jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace =", "* jnp.array([0.0, -1.0]) # Get the tangent vector, which is orthogonal to the", "rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1])", "as used for ground contact normal_force = -contact_k * phi_finger_box # scalar, in", "the box frame right_or_up = p_BF[1] > -p_BF[0] left_or_up = p_BF[1] > p_BF[0]", "jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask * c * tangential_velocity + slipping_mask *", "a damped spring, as in # the simplified friction model from eq 21", "state of the finger box_size: side length of box returns: float signed distance", "-finger_state[2:] finger_forces = finger_forces + finger_control_stiffness * finger_pos_error finger_forces = finger_forces + finger_control_damping", "* tangential_velocity + slipping_mask * mu_d tangent_force = -mu * normal_force contact_force =", "velocities of each corner point r = jnp.sqrt(2) * half_size v_BC = (", "* c * tangential_velocity + slipping_mask * mu_d tangent_force = -mu * normal_force", "distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]),", "1]) # For each corner, sum up the forces and torques due to", "box_side_m = 0.5 box_inertia = 1 / 6 * box_mass_kg * box_side_m **", "): \"\"\"Compute a single discrete-time update for box manipulation with one finger, using", "box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single", "finger1_state, mu_d, c, psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c,", "contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s,", "velocity in determining sticking friction psi_s: tangential velocity where slipping begins contact_k: spring", "box_size / 2.0), p_BF[0] - box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size", "\"\"\" # Credit to this stackoverflow answer for the inspiration for this code:", "= p_BF[1] > -p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up))", "= v_WF - v_WB v_BF = R_WB.T @ v_BF_W # Get velocity of", "from eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity =", "finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d", "fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d", "finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the simulated values return box_state_trace,", "z. \"\"\" # Contact point is just the finger point in the box", "(x, z, theta, vx, vz, omega) state of the box finger_state: current (x,", "box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF # Get velocity of finger", "= jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace", "= jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:])", "finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness,", "###################################### # define parameters of the simulation ###################################### # Box properties box_mass_kg =", "finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force(", "Itegrate new_box_state = box_state + dt * box_state_dot new_finger1_state = finger1_state + dt", "box_forces = jnp.zeros(3) # Gravitational force on each body finger1_forces = finger1_forces.at[1].add(-g *", "# corner points in box frame # Transform into world frame R_WB =", "= 0.5 box_inertia = 1 / 6 * box_mass_kg * box_side_m ** 2", "box_state_dot new_finger_state = finger_state + dt * finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate(", "the parameter for the finger stiffness control returns: new_box_state, new_finger_state \"\"\" ###################################### #", "- p_WB # Rotate p_BF_W by -theta about the z axis to get", "= jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body finger_forces =", "finger_state: current (x, z, theta, vx, vz, omega) state of the box mu_d:", "next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i,", "contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state,", "constant of contact returns: contact force in x and z \"\"\" # Get", "First transform the finger (x, z) into the box frame p_WF = finger_pose", "desired (x_d, z_d) state of the first finger finger2_state: current (x, z, vx,", "finger, using the penalty method for contact modelling with a simplified Coulomb friction", "contact returns: Tuple of - contact wrench on box in x, z, and", "inputs args: box_state_initial: initial (x, z, theta, vx, vz, omega) state of the", "finger_pos_error finger_forces = finger_forces + finger_control_damping * finger_vel_error # Contact forces from the", "Gravitational force on each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g", "z, vx, vz) state of the finger finger2_state_desired_trace: N_steps x 2 array of", "+ finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error =", "= jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner, sum up the forces and", "= 9.81 dt = 0.001 # seconds per step ###################################### # Get forces", "contact modelling with a simplified Coulomb friction model args: box_state: current (x, z,", "theta. \"\"\" # Start by finding any box corner points that intersect the", "initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0,", "p_BF_W = p_WF - p_WB # Rotate p_BF_W by -theta about the z", "jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the", "# top left [half_size, half_size], # top right [-half_size, -half_size], # bottom left", "for the finger stiffness control returns: new_box_state, new_finger_state \"\"\" ###################################### # define parameters", "x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size / 2.0) z_dist", "vz) state of the first finger finger1_state_desired: desired (x_d, z_d) state of the", "<reponame>dawsonc/a_tale_of_two_gradients<filename>autograd_manipulation/sim.py \"\"\"Automatically-differentiable manipulation simulation engine using JAX\"\"\" import jax.numpy as jnp import jax", "of contact contact_d: damping constant of contact returns: contact wrench in x, z,", "1], # bottom right ] ) ) # corner point velocities in box", "finger_state: current (x, z, vx, vz) state of the finger finger_state_desired: desired (x_d,", "box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation", "the side length of the box mu_d: coefficient of friction between box and", "conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i", "for contact modelling with a simplified Coulomb friction model args: box_state: current (x,", "jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF = R_BW @", "* half_size v_BC = ( box_state[5] * r * jnp.array( [ [-1, -1],", "= box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate", "of the finger finger_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state", "damped spring, as in # the simplified friction model from eq 21 and", "finger_forces = finger_forces + finger_control_stiffness * finger_pos_error finger_forces = finger_forces + finger_control_damping *", "vx, vz) state of the finger box_size: float indicating the side length of", "finger relative to contact pt in box frame v_contactF_B = v_BF - v_Bcontact", "angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d,", "= box_forces.at[1].add(-g * box_mass_kg) # Control forces on finger finger_pos_error = finger_state_desired -", "- 1, :] current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i -", "* phi_corner_ground[i] normal_force = normal_force - contact_d * normal_velocity * ( phi_corner_ground[i] <", "= jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask * c * tangential_velocity + slipping_mask", "box_size / 2.0), p_BF[1] - box_size / 2.0) # phi = signed distance.", "finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces +=", "mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between box and fingers", "z, and theta. \"\"\" # Start by finding any box corner points that", "-finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces + finger_control_damping", "dt * box_state_dot new_finger1_state = finger1_state + dt * finger1_state_dot new_finger2_state = finger2_state", "slipping begins contact_k: spring constant of contact contact_d: damping constant of contact returns:", "jnp.array( [ [-1, -1], # top left [-1, 1], # top right [1,", "coefficient of friction between box and ground while slipping c: coefficient of tangential", "dt * finger1_state_dot new_finger2_state = finger2_state + dt * finger2_state_dot return new_box_state, new_finger1_state,", "constant of contact returns: Tuple of - contact wrench on box in x,", "= finger_pose p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF - p_WB", "finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in range(1, N_steps): # get currents state", "next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness,", "corner point velocities in box frame # Transform to world frame v_WC =", "finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) #", "finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error", "# Box properties box_mass_kg = 1.0 box_side_m = 0.5 box_inertia = 1 /", "2D rotation matrix for angle theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] )", "matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot", "jnp.minimum(0, phi_finger_box) # Use the same simplified friction model as used for ground", "rectangle # First transform the finger (x, z) into the box frame p_WF", ":].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in range(1, N_steps): #", "box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i", "with damping. normal_velocity = v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force = normal_force", "= finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error", "= jnp.sqrt(2) * half_size v_BC = ( box_state[5] * r * jnp.array( [", "= 1.0 box_side_m = 0.5 box_inertia = 1 / 6 * box_mass_kg *", "the first finger finger1_state_desired: desired (x_d, z_d) state of the first finger finger2_state:", "# Build the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot =", "first finger finger2_state: current (x, z, vx, vz) state of the second finger", "= finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness *", "frame v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF # Get", "contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically integrate", "Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot", "= jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask *", "* mu_d * tangent tangent_force = -mu * normal_force # vector! # Sum", "p_WC[:, 1]) # For each corner, sum up the forces and torques due", "and z \"\"\" # Get the position and velocity of the finger in", "Get forces on each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces", "the contact in the box frame right_or_up = p_BF[1] > -p_BF[0] left_or_up =", "z, vx, vz) state of the finger finger1_state_desired_trace: N_steps x 2 array of", "# Contact point is just the finger point in the box frame p_WF", "get next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired,", ":].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the simulated values return box_state_trace, finger1_state_trace,", "current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get", "c, psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ###################################### #", "specified control inputs args: box_state_initial: initial (x, z, theta, vx, vz, omega) state", "states and applying the specified control inputs args: box_state_initial: initial (x, z, theta,", "vz, omega) state of the box finger_state: current (x, z, vx, vz) state", ") finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact", "normal_force - contact_d * normal_velocity * (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity +", "new_box_state, new_finger_state \"\"\" ###################################### # define parameters of the simulation ###################################### # Box", "mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box,", "box manipulation with one finger, using the penalty method for contact modelling with", "= contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add(", "# Get the contact forces. Approximate ground force as a damped spring, as", "* normal_velocity * ( phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i, 0] sticking_mask", "top left [half_size, half_size], # top right [-half_size, -half_size], # bottom left [half_size,", "state of the box finger_pose: current (x, z) state of the finger box_size:", "omega) state of the box mu_d: coefficient of friction between box and ground", "finger args: box_pose: current (x, z, theta) state of the box finger_pose: current", "-jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute", "relative velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) * normal ) # relative", "in tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal vector tangent", "0 half_size = box_size / 2.0 p_BC = jnp.array( [ [-half_size, half_size], #", "jnp.tile(box_state[3:5], [4, 1]) # Find any that have negative z: min(0, signed distance)", "c, psi_s, contact_k, contact_d ) # Contact forces between box and fingers finger1_wrench_on_box,", "1] # get next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired,", "the box and finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W,", "finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces =", "box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1", "= -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state,", "finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error", "args: box_state_initial: initial (x, z, theta, vx, vz, omega) state of the box", "= box_state + dt * box_state_dot new_finger_state = finger_state + dt * finger_state_dot", "normal_velocity = v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i] normal_force = normal_force -", "for i in range(1, N_steps): # get currents state current_box_state = box_state_trace[i -", "jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact", "def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the", "1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation", "@ p_BF_W # Get velocity of the finger in box frame v_WF =", "returns: new_box_state, new_finger_state \"\"\" ###################################### # define parameters of the simulation ###################################### #", "transform the finger (x, z) into the box frame p_WF = finger_pose p_WB", "the contact force to the box and finger box_wrench = jnp.zeros(3) box_wrench =", "# Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i,", "vx, vz) state of the first finger finger1_state_desired: desired (x_d, z_d) state of", ") finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d ) finger2_forces +=", "Contact forces between box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m,", "finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error #", "finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness,", "c, psi_s, contact_k, contact_d ) # Contact forces between box and finger finger_wrench_on_box,", "box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate ###################################### # Build", "using JAX\"\"\" import jax.numpy as jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size):", "psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically", "1], # top right [1, -1], # bottom left [1, 1], # bottom", "single discrete-time update for box manipulation with one finger, using the penalty method", "* jnp.sqrt(box_mass_kg * contact_k) # critical damping # General properties g = 9.81", "applying the specified control inputs args: box_state_initial: initial (x, z, theta, vx, vz,", "finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) #", "finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces = finger1_forces + finger_control_damping *", "box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot =", "body ###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each", ":] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired", "* normal_velocity * (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s", "= box_state[3:5] v_BF_W = v_WF - v_WB v_BF = R_WB.T @ v_BF_W #", "arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4))", "2.0), p_BF[1] - box_size / 2.0) # phi = signed distance. phi =", "p_BC_W = (R_WB @ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1]) #", "+= normal_up * jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0, -1.0]) # Get", "at z = 0 half_size = box_size / 2.0 p_BC = jnp.array( [", "the finger finger_control_stiffness: the parameter for the finger stiffness control returns: new_box_state, new_finger_state", "= finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF - v_WB v_BF = R_WB.T", "next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save", "= jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask *", "+= calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force(", "points that intersect the ground at z = 0 half_size = box_size /", "finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces +=", "box finger_state: current (x, z, vx, vz) state of the finger box_size: float", "the box finger_state: current (x, z, vx, vz) state of the finger finger_state_desired:", "finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces", "frame p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF - p_WB R_WB", "- v_Bcontact # Get the normal vector of the contact in the box", "normal_right * jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0, 0.0]) normal += normal_up", "x, z, and theta. \"\"\" # Start by finding any box corner points", "= finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in range(1, N_steps): # get currents", "finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation with one", "of the first finger finger1_state_desired: desired (x_d, z_d) state of the first finger", "return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps,", "contact_force_W = R_WB @ contact_force_B # Add the contact force to the box", "friction force. Approximate ground force as a damped spring, as in # the", "finger over time finger2_state_initial: initial (x, z, vx, vz) state of the finger", "state of the finger over time finger_control_stiffness: the parameter for the finger stiffness", "slipping_mask * mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]) #", "finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) #", "wrench between the box and the ground. args: box_state: current (x, z, theta,", "[1, 0]]) @ p_BF # Get velocity of finger relative to contact pt", "* jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0, -1.0]) # Get the tangent", "same simplified friction model as used for ground contact normal_force = -contact_k *", "box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d,", "contact force in x and z \"\"\" # Get the position and velocity", "normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down", "finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d )", "constant of contact contact_d: damping constant of contact returns: contact force in x", "( box_state[5] * r * jnp.array( [ [-1, -1], # top left [-1,", "# top right [-half_size, -half_size], # bottom left [half_size, -half_size], # bottom right", "= calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces +=", "1e-3) # Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) #", "initial states and applying the specified control inputs args: box_state_initial: initial (x, z,", "p_WF = finger_state[:2] v_WF = finger_state[2:] # Get penetration into ground phi_finger_ground =", "1]) # Also find the velocities of each corner point r = jnp.sqrt(2)", "p_BC = jnp.array( [ [-half_size, half_size], # top left [half_size, half_size], # top", "* c * tangential_velocity + slipping_mask * mu_d * tangent tangent_force = -mu", "dt = 0.001 # seconds per step ###################################### # Get forces on each", "z, theta, vx, vz, omega) state of the box mu_d: coefficient of friction", "0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) *", "jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d):", "-1], # top left [-1, 1], # top right [1, -1], # bottom", "* finger_vel_error # Contact forces from the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m,", ":] current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get next state next_box_state, next_finger1_state, next_finger2_state", "friction model from eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping.", "normal_down * jnp.array([0.0, -1.0]) # Get the tangent vector, which is orthogonal to", "force on each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g *", "= finger2_state_desired_trace[i - 1] # get next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step(", "+ dt * box_state_dot new_finger_state = finger_state + dt * finger_state_dot return new_box_state,", "slipping c: coefficient of tangential velocity in determining sticking friction psi_s: tangential velocity", "= 0 half_size = box_size / 2.0 p_BC = jnp.array( [ [-half_size, half_size],", "finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on finger", "box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s,", "top left [-1, 1], # top right [1, -1], # bottom left [1,", "and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k,", "normal vector # and points in the same direction as the relative velocity", "* mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add", "traces box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store the initial", "# Rotate p_BF_W by -theta about the z axis to get position in", "p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF - p_WB R_WB =", "engine using JAX\"\"\" import jax.numpy as jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose,", "* tangential_velocity + slipping_mask * mu_d * tangent tangent_force = -mu * normal_force", "# Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial)", "of friction between box and ground while slipping c: coefficient of tangential velocity", "for this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the finger (x,", "2 # Contact properties mu_d = 0.7 c = 2.0 psi_s = mu_d", "= jnp.zeros(3) # Gravitational force on each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg)", "box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update", "# relative velocity in tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar, along the", "box frame contact_force_B = normal_force * normal + tangent_force # transform into the", "jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0, 1.0]) normal += normal_down * jnp.array([0.0,", "forces between box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d,", "new_finger2_state = finger2_state + dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate(", "model as used for ground contact normal_force = -contact_k * phi_finger_box # scalar,", "= jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store", "normal_force]) # Add the friction force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) #", "+ box_size / 2.0), p_BF[1] - box_size / 2.0) # phi = signed", "box finger_state_initial: initial (x, z, vx, vz) state of the finger finger_state_desired_trace: N_steps", "torques due to penetration with the ground contact_wrench_on_box = jnp.zeros(3) for i in", "finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get next state", ") # Clip to only consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box) #", "with a simplified Coulomb friction model args: box_state: current (x, z, theta, vx,", "z, theta, vx, vz, omega) state of the box finger1_state: current (x, z,", "in box frame v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF -", "phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the same simplified friction model as used", "jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up),", "v_WB v_BF = R_WB.T @ v_BF_W # Get velocity of contact point in", "# get next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state,", "/ 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size /", "= finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces /", "same direction as the relative velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) *", "and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1] normal_force =", "jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body finger1_forces = finger1_forces.at[1].add(-g", "tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) *", "mu_d * tangent tangent_force = -mu * normal_force # vector! # Sum up", ") box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate ###################################### #", "z, vx, vz) state of the finger box_size: float indicating the side length", ") # corner points in box frame # Transform into world frame R_WB", "and theta. \"\"\" # Start by finding any box corner points that intersect", "jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0,", "Contact forces between box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m,", "finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation with one finger,", "contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench(", "the penalty method for contact modelling with a simplified Coulomb friction model args:", "- contact_d * normal_velocity * (phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask =", "damping constant of contact returns: contact wrench in x, z, and theta. \"\"\"", "[1, 1], # bottom right ] ) ) # corner point velocities in", "+ slipping_mask * mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force])", "finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces + finger_control_damping *", "the box frame p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF -", "this code: # stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the finger (x, z)", "negative values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the same simplified friction model", "box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger_forces +=", "contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i,", "top right [-half_size, -half_size], # bottom left [half_size, -half_size], # bottom right ]", ") tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask)", "finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces = finger_forces + finger_control_stiffness", "z, theta) state of the box finger_pose: current (x, z) state of the", "current (x, z, vx, vz) state of the finger finger_state_desired: desired (x_d, z_d)", "of each corner point r = jnp.sqrt(2) * half_size v_BC = ( box_state[5]", "box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box", "finger and the ground. args: finger_state: current (x, z, theta, vx, vz, omega)", "range(1, N_steps): # get currents state current_box_state = box_state_trace[i - 1, :] current_finger_state", "[half_size, -half_size], # bottom right ] ) # corner points in box frame", "sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu = sticking_mask", "* box_state_dot new_finger_state = finger_state + dt * finger_state_dot return new_box_state, new_finger_state def", "finger2_vel_error # Contact forces from ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c,", "simulation engine using JAX\"\"\" import jax.numpy as jnp import jax @jax.jit def box_finger_signed_distance(box_pose,", "jnp.tile(box_state[:2], [4, 1]) # Also find the velocities of each corner point r", "c, psi_s, contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k,", "# Transform to world frame v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4,", "= jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot", "= -contact_k * phi_finger_ground normal_force = normal_force - contact_d * normal_velocity * (phi_finger_ground", "the box mu_d: coefficient of friction between box and ground while slipping c:", "damping. normal_velocity = v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i] normal_force = normal_force", "6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the initial", "normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal vector tangent = tangential_velocity /", "Itegrate new_box_state = box_state + dt * box_state_dot new_finger_state = finger_state + dt", "+= normal_left * jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0, 1.0]) normal +=", "bottom right ] ) # corner points in box frame # Transform into", "finger stiffness control returns: new_box_state, new_finger_state \"\"\" ###################################### # define parameters of the", "= 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping # General properties g", "the world frame contact_force_W = R_WB @ contact_force_B # Add the contact force", "time steps to simulate returns: box_state_trace, finger_state_trace \"\"\" # Create arrays to store", "# seconds per step ###################################### # Get forces on each body ###################################### finger_forces", "z, vx, vz) state of the finger finger_state_desired_trace: N_steps x 2 array of", "finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the simulated values", "N_steps): # get currents state current_box_state = box_state_trace[i - 1, :] current_finger_state =", "and applying the specified control inputs args: box_state_initial: initial (x, z, theta, vx,", "# Sum up the contact forces in the box frame contact_force_B = normal_force", "box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] /", "x 2 array of desired (x_d, z_d) state of the finger over time", "(x_d, z_d) state of the finger over time finger2_state_initial: initial (x, z, vx,", "finger_control_stiffness: the parameter for the finger stiffness control returns: new_box_state, new_finger_state \"\"\" ######################################", "the box and the ground. args: box_state: current (x, z, theta, vx, vz,", "the velocities of each corner point r = jnp.sqrt(2) * half_size v_BC =", "the finger finger_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of", "damping. normal_velocity = v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force = normal_force -", "- 1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i - 1,", "finger_control_damping * finger_vel_error # Contact forces from the ground. box_forces += calc_box_ground_wrench( box_state,", "v_contactF_B - v_contactF_B.dot(normal) * normal ) # relative velocity in tangent direction normal_velocity", "box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i", "time finger2_state_initial: initial (x, z, vx, vz) state of the finger finger2_state_desired_trace: N_steps", "= finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get next", "of contact returns: Tuple of - contact wrench on box in x, z,", "/ 2.0) # phi = signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi", "box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i,", "= box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2]", "* phi_finger_box # scalar, in normal direction normal_force = normal_force - contact_d *", "= finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces /", "# Credit to this stackoverflow answer for the inspiration for this code: #", "theta, vx, vz, omega) state of the box finger1_state: current (x, z, vx,", "= p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W #", "state of the finger finger_state_desired: desired (x_d, z_d) state of the finger finger_control_stiffness:", "+= box_force_on_finger ###################################### # Numerically integrate ###################################### # Build the derivatives matrix box_state_dot", "r * jnp.array( [ [-1, -1], # top left [-1, 1], # top", "current_finger2_state, current_finger2_state_desired, finger_control_stiffness, ) # Save box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i,", "** 2 # Finger properties finger_mass_kg = 0.1 finger_control_damping = 2 # Contact", "the second finger finger2_state_desired: desired (x_d, z_d) state of the second finger finger_control_stiffness:", "initial (x, z, vx, vz) state of the finger finger_state_desired_trace: N_steps x 2", "v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF # Get velocity", "v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i] normal_force = normal_force - contact_d *", "forces on each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces =", "= 2.0 psi_s = mu_d / c contact_k = 1000 contact_d = 2", "* finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces", "seconds per step ###################################### # Get forces on each body ###################################### finger_forces =", "state current_box_state = box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i - 1, :]", "array of desired (x_d, z_d) state of the finger over time finger2_state_initial: initial", "box_state[:2] p_BF_W = p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @", "left [half_size, -half_size], # bottom right ] ) # corner points in box", "contact_d ) box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state,", "mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add the", "z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner,", "distance from the box to the finger args: box_pose: current (x, z, theta)", "= box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot", "# Numerically integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot", ":] current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state", "finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on", "0.1 finger_control_damping = 2 # Contact properties mu_d = 0.7 c = 2.0", "contact_k, contact_d ) finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d )", "finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control", "world frame contact_force_W = R_WB @ contact_force_B # Add the contact force to", "frame contact_force_B = normal_force * normal + tangent_force # transform into the world", "p_BF_W = p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W", "current_box_state = box_state_trace[i - 1, :] current_finger1_state = finger1_state_trace[i - 1, :] current_finger1_state_desired", "body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control", "constant of contact contact_d: damping constant of contact returns: Tuple of - contact", "state of the finger over time finger2_state_initial: initial (x, z, vx, vz) state", "begins contact_k: spring constant of contact contact_d: damping constant of contact returns: Tuple", "spring, as in # the simplified friction model from eq 21 and 22", "box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T", "finger1_state_trace[i - 1, :] current_finger1_state_desired = finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i -", "velocity in tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal vector", "# Return the simulated values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state,", "theta, vx, vz, omega) state of the box finger_state: current (x, z, vx,", "box returns: float signed distance \"\"\" # Credit to this stackoverflow answer for", "[[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF = R_BW @ p_BF_W", "new_finger_state = finger_state + dt * finger_state_dot return new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial,", ") # corner point velocities in box frame # Transform to world frame", "finger_mass_kg = 0.1 finger_control_damping = 2 # Contact properties mu_d = 0.7 c", "Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip to", "finger in the world frame p_WF = finger_state[:2] v_WF = finger_state[2:] # Get", ") # Contact forces between box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state,", "R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2],", "= finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on", "as a damped spring, as in # the simplified friction model from eq", "of contact contact_d: damping constant of contact returns: contact force in x and", "modelling with a simplified Coulomb friction model args: box_state: current (x, z, theta,", "[4, 1]) # Also find the velocities of each corner point r =", "the relative velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) * normal ) #", "finger_forces + finger_control_damping * finger_vel_error # Contact forces from the ground. box_forces +=", "penetration with the ground contact_wrench_on_box = jnp.zeros(3) for i in range(4): # Get", "finger_control_damping = 2 # Contact properties mu_d = 0.7 c = 2.0 psi_s", "psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench between the box and the ground.", "* jnp.sign(tangential_velocity) mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d", "tangent_force # transform into the world frame contact_force_W = R_WB @ contact_force_B #", "due to penetration with the ground contact_wrench_on_box = jnp.zeros(3) for i in range(4):", "* normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add the friction force to the", "z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size / 2.0) #", "21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i, 1]", "from the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d", "box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) #", "point in the box frame p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W =", "Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state + dt", "to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace", "box_state[5] * r * jnp.array( [ [-1, -1], # top left [-1, 1],", "finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia)", "spring constant of contact contact_d: damping constant of contact returns: Tuple of -", "1] # get next state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness,", "in x and z \"\"\" # Get the position and velocity of the", "ground while slipping c: coefficient of tangential velocity in determining sticking friction psi_s:", "points in the same direction as the relative velocity tangential_velocity = ( v_contactF_B", "\"\"\" # Start by finding any box corner points that intersect the ground", "box_mass_kg) # Control forces on finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error =", "signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip to only", "\"\"\" # Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace", "for the finger stiffness control N_steps: int specifying the number of discrete time", "/ (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) # Get signed distance phi_finger_box = box_finger_signed_distance(", "box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically integrate ###################################### # Build", "one finger, starting at the given initial states and applying the specified control", "a finger and the ground. args: finger_state: current (x, z, theta, vx, vz,", "(x, z, theta, vx, vz, omega) state of the box mu_d: coefficient of", "forces. Approximate ground force as a damped spring, as in # the simplified", "of the finger finger_control_stiffness: the parameter for the finger stiffness control returns: new_box_state,", "finger2_state_desired: desired (x_d, z_d) state of the second finger finger_control_stiffness: the parameter for", "the finger in the world frame p_WF = finger_state[:2] v_WF = finger_state[2:] #", "box_mass_kg * box_side_m ** 2 # Finger properties finger_mass_kg = 0.1 finger_control_damping =", "= finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g *", "Get the friction force. Approximate ground force as a damped spring, as in", "manipulation simulation engine using JAX\"\"\" import jax.numpy as jnp import jax @jax.jit def", "box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step(", ":] current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired = finger_state_desired_trace[i - 1] #", "/ box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot new_finger1_state =", "contact_k, contact_d ) # Contact forces between box and fingers finger1_wrench_on_box, box_force_on_finger1 =", "at the given initial states and applying the specified control inputs args: box_state_initial:", "contact_wrench_on_box = jnp.zeros(3) for i in range(4): # Get the friction force. Approximate", ") box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state,", "# Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace =", "finger in box frame v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF", "z, theta, vx, vz, omega) state of the box finger_state: current (x, z,", "1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask * c * tangential_velocity", "= R_BW @ p_BF_W # Now get the signed distance x_dist = jnp.maximum(-(p_BF[0]", "= jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k,", "vz) state of the finger finger_state_desired: desired (x_d, z_d) state of the finger", "finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF - p_WB R_WB = rotation_matrix(box_state[2]) p_BF", "= p_WF - p_WB # Rotate p_BF_W by -theta about the z axis", "points in box frame # Transform into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W", "# scalar, in normal direction normal_force = normal_force - contact_d * normal_velocity *", "- 1] # get next state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired,", "finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness", "calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between box", "finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return the simulated values return box_state_trace, finger1_state_trace, finger2_state_trace", "* finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on finger finger_pos_error", "-jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]] ) R_BW = R_WB.T p_BF = R_BW @ p_BF_W #", "the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot =", "vx, vz, omega) state of the box box_size: float indicating the side length", "box frame # Transform to world frame v_WC = (R_WB @ v_BC.T).T +", "finger2_forces + finger_control_damping * finger2_vel_error # Contact forces from ground. box_forces += calc_box_ground_wrench(", "jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return box_wrench,", "finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error finger1_forces =", "simplified friction model from eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with", "###################################### # Get forces on each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces =", "contact_d * normal_velocity * ( phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i, 0]", "parameter for the finger stiffness control returns: new_box_state, new_finger_state \"\"\" ###################################### # define", "and torques due to penetration with the ground contact_wrench_on_box = jnp.zeros(3) for i", "1, :] current_finger_state_desired = finger_state_desired_trace[i - 1] # get next state next_box_state, next_finger_state", "theta) state of the box finger_pose: current (x, z) state of the finger", "# bottom left [half_size, -half_size], # bottom right ] ) # corner points", "] ) # corner points in box frame # Transform into world frame", "v_contactF_B.dot(normal) * normal ) # relative velocity in tangent direction normal_velocity = v_contactF_B.dot(normal)", "box_state_trace = jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions", "box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values", "finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:]", "forces in the box frame contact_force_B = normal_force * normal + tangent_force #", "-1], [1, 0]]) @ p_BF # Get velocity of finger relative to contact", "values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the same simplified friction model as", "point velocities in box frame # Transform to world frame v_WC = (R_WB", "tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add the friction", "vx, vz, omega) state of the box finger_state: current (x, z, vx, vz)", "= jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist]))", "the box box_size: float indicating the side length of the box mu_d: coefficient", "-contact_force_W)) finger_forces = contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired,", "+ jnp.tile(box_state[:2], [4, 1]) # Also find the velocities of each corner point", "current (x, z, theta, vx, vz, omega) state of the box finger1_state: current", "of the box mu_d: coefficient of friction between box and ground while slipping", "box_forces = jnp.zeros(3) # Gravitational force on each body finger_forces = finger_forces.at[1].add(-g *", "contact_k: spring constant of contact contact_d: damping constant of contact returns: contact force", "jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D", "tangent_force = -mu * normal_force # vector! # Sum up the contact forces", "consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the same simplified friction", "finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in range(1, N_steps): # get currents state", "the box finger1_state_initial: initial (x, z, vx, vz) state of the finger finger1_state_desired_trace:", "# Get forces on each body ###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3)", "box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2", "left [1, 1], # bottom right ] ) ) # corner point velocities", "jnp.array([tangent_force, normal_force]) # Add the friction force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force)", "box_state_dot new_finger1_state = finger1_state + dt * finger1_state_dot new_finger2_state = finger2_state + dt", "box_forces.at[1].add(-g * box_mass_kg) # Control forces on finger finger_pos_error = finger_state_desired - finger_state[:2]", "= jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size / 2.0) # phi", "mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit", "box_state[:3], finger_state[:2], box_size ) # Clip to only consider negative values phi_finger_box =", "the finger finger1_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of", "jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal", "add the torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) )", "v_BF = R_WB.T @ v_BF_W # Get velocity of contact point in box", "* jnp.array([[0, -1], [1, 0]]) @ p_BF # Get velocity of finger relative", "R_WB.T @ v_BF_W # Get velocity of contact point in box frame v_Bcontact", "finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values return box_state_trace, finger_state_trace @jax.jit", "one finger, using the penalty method for contact modelling with a simplified Coulomb", "finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger1_wrench_on_box finger1_forces +=", "= jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size / 2.0) z_dist =", "p_BF_W # Now get the signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size /", "+ jnp.tile(box_state[3:5], [4, 1]) # Find any that have negative z: min(0, signed", "friction psi_s: tangential velocity where slipping begins contact_k: spring constant of contact contact_d:", "+ finger_control_damping * finger2_vel_error # Contact forces from ground. box_forces += calc_box_ground_wrench( box_state,", "-1.0]) # Get the tangent vector, which is orthogonal to the normal vector", "tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) * normal ) # relative velocity in", "= finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in range(1, N_steps): # get currents", "to the normal vector # and points in the same direction as the", "as in # the simplified friction model from eq 21 and 22 in", "# get currents state current_box_state = box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i", "point in box frame v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]]) @", "= jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:])", "initial (x, z, vx, vz) state of the finger finger1_state_desired_trace: N_steps x 2", "top right [1, -1], # bottom left [1, 1], # bottom right ]", "normal_velocity * ( phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i, 0] sticking_mask =", "p_WB R_WB = rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W # Get velocity of", "-p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up,", "with one finger, starting at the given initial states and applying the specified", "signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner, sum up", "of the finger over time finger_control_stiffness: the parameter for the finger stiffness control", "direction normal_force = normal_force - contact_d * normal_velocity * (phi_finger_box < 0) sticking_mask", "the signed distance from the box to the finger args: box_pose: current (x,", "v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find any that", "theta, vx, vz, omega) state of the box mu_d: coefficient of friction between", "# Contact properties mu_d = 0.7 c = 2.0 psi_s = mu_d /", "finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot = box_state_dot.at[5].add(box_forces[2]", "theta, vx, vz, omega) state of the box finger_state_initial: initial (x, z, vx,", "mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ######################################", "contact_d): \"\"\"Compute the contact force between a finger and the ground. args: finger_state:", "tangent direction normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal vector tangent =", "* (phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask", "tangential velocity in determining sticking friction psi_s: tangential velocity where slipping begins contact_k:", "finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces", "this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit def", "finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for box manipulation", "(x, z, vx, vz) state of the finger finger_state_desired: desired (x_d, z_d) state", "c, psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### #", "friction force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque", "contact forces in the box frame contact_force_B = normal_force * normal + tangent_force", "+= finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically integrate ###################################### # Build the", "* finger2_pos_error finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error # Contact forces from", "= box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) # Return", "simplified Coulomb friction model args: box_state: current (x, z, theta, vx, vz, omega)", "# Also add the torque from this interaction contact_wrench_on_box = contact_wrench_on_box.at[2].add( jnp.cross(p_BC_W[i, :],", "finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution of the box-finger system with one", "mu_d, c, psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s,", "tangent tangent_force = -mu * normal_force # vector! # Sum up the contact", "current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get next state next_box_state, next_finger1_state, next_finger2_state =", "= jnp.zeros((N_steps, 6)) finger_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace", "0 ) tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask =", "finger_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between box and", "in box frame v_contactF_B = v_BF - v_Bcontact # Get the normal vector", "contact_d ) box_forces += finger_wrench_on_box finger_forces += box_force_on_finger ###################################### # Numerically integrate ######################################", "jnp.sqrt(box_mass_kg * contact_k) # critical damping # General properties g = 9.81 dt", "of the box box_size: float indicating the side length of the box mu_d:", "c * tangential_velocity + slipping_mask * mu_d * tangent tangent_force = -mu *", "of the finger over time finger2_state_initial: initial (x, z, vx, vz) state of", "jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k,", "= jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body finger1_forces =", ") return phi @jax.jit def rotation_matrix(theta): \"\"\"Return the 2D rotation matrix for angle", "[[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d):", "phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist,", "= finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot =", "+= normal_down * jnp.array([0.0, -1.0]) # Get the tangent vector, which is orthogonal", "* phi_finger_ground normal_force = normal_force - contact_d * normal_velocity * (phi_finger_ground < 0)", "box_state_trace = box_state_trace.at[i, :].set(next_box_state) finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state) finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state) #", "the forces and torques due to penetration with the ground contact_wrench_on_box = jnp.zeros(3)", "force on finger in x and z. \"\"\" # Contact point is just", "normal_force = -contact_k * phi_corner_ground[i] normal_force = normal_force - contact_d * normal_velocity *", "the contact force between a finger and the ground. args: finger_state: current (x,", "Approximate ground force as a damped spring, as in # the simplified friction", "box_forces.at[1].add(-g * box_mass_kg) # Control forces on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2]", "1 / 6 * box_mass_kg * box_side_m ** 2 # Finger properties finger_mass_kg", "contact_force @jax.jit def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact", "phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1]) # For each corner, sum up the forces", "contact_k, contact_d ) finger2_forces += calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d )", "finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces", "+ box_size / 2.0), p_BF[0] - box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] +", "# seconds per step ###################################### # Get forces on each body ###################################### finger1_forces", "# Get the position and velocity of the finger in the world frame", "finger_pose, box_size): \"\"\"Compute the signed distance from the box to the finger args:", "control returns: new_box_state, new_finger_state \"\"\" ###################################### # define parameters of the simulation ######################################", "box_size: float indicating the side length of the box mu_d: coefficient of friction", "stiffness control N_steps: int specifying the number of discrete time steps to simulate", "current (x, z, theta) state of the box finger_pose: current (x, z) state", "frame # Transform to world frame v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5],", "jnp.array([0.0, -1.0]) # Get the tangent vector, which is orthogonal to the normal", "vx, vz) state of the second finger finger2_state_desired: desired (x_d, z_d) state of", "box_inertia = 1 / 6 * box_mass_kg * box_side_m ** 2 # Finger", "in x, z, and theta. - contact force on finger in x and", "state of the box finger1_state: current (x, z, vx, vz) state of the", "= normal_force - contact_d * normal_velocity * (phi_finger_ground < 0) tangential_velocity = v_WF[0]", "finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces =", "/ box_mass_kg) finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) #", "normal_force - contact_d * normal_velocity * (phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask", "-mu * normal_force contact_force = jnp.array([tangent_force, normal_force]) # Add the friction force to", "omega) state of the box box_size: float indicating the side length of the", ":].set(finger2_state_initial) # Simulate for i in range(1, N_steps): # get currents state current_box_state", "def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time", "frame p_WF = finger_pose p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF", "ground force as a damped spring, as in # the simplified friction model", "box finger_state: current (x, z, vx, vz) state of the finger finger_state_desired: desired", "-mu * normal_force # vector! # Sum up the contact forces in the", "(x, z, theta) state of the box finger_pose: current (x, z) state of", "= normal_force - contact_d * normal_velocity * ( phi_corner_ground[i] < 0 ) tangential_velocity", "finger_state[:2] v_WF = finger_state[2:] # Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1])", "phi_finger_ground normal_force = normal_force - contact_d * normal_velocity * (phi_finger_ground < 0) tangential_velocity", "normal_force = -contact_k * phi_finger_ground normal_force = normal_force - contact_d * normal_velocity *", "box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute the contact wrench", "forces and torques due to penetration with the ground contact_wrench_on_box = jnp.zeros(3) for", "= jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down =", "normal_left * jnp.array([-1.0, 0.0]) normal += normal_up * jnp.array([0.0, 1.0]) normal += normal_down", "# Add the contact force to the box and finger box_wrench = jnp.zeros(3)", "finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in", "= calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces +=", "discrete time steps to simulate returns: box_state_trace, finger_state_trace \"\"\" # Create arrays to", "position and velocity of the finger in the world frame p_WF = finger_state[:2]", "- finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces", "any that have negative z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1])", "box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for", "right_or_up = p_BF[1] > -p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up,", "* ( phi_corner_ground[i] < 0 ) tangential_velocity = v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity)", "# General properties g = 9.81 dt = 0.001 # seconds per step", "-contact_k * phi_finger_ground normal_force = normal_force - contact_d * normal_velocity * (phi_finger_ground <", "parameters of the simulation ###################################### # Box properties box_mass_kg = 1.0 box_side_m =", "Contact forces from the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s,", "vz, omega) state of the box finger1_state_initial: initial (x, z, vx, vz) state", "the simulated values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state,", "p_WF[1]) # Get the contact forces. Approximate ground force as a damped spring,", "evolution of the box-finger system with one finger, starting at the given initial", "z, theta, vx, vz, omega) state of the box finger_state_initial: initial (x, z,", "in x, z, and theta. \"\"\" # Start by finding any box corner", "# Use the same simplified friction model as used for ground contact normal_force", "box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces += finger_wrench_on_box finger_forces", "# Start by finding any box corner points that intersect the ground at", "-finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces + finger_control_damping", "Add the friction force to the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add", "the box contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from this interaction", "bottom left [1, 1], # bottom right ] ) ) # corner point", "(x_d, z_d) state of the finger finger_control_stiffness: the parameter for the finger stiffness", "bottom right ] ) ) # corner point velocities in box frame #", "import jax.numpy as jnp import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the", "fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces +", "box to the finger args: box_pose: current (x, z, theta) state of the", "= finger2_forces + finger_control_damping * finger2_vel_error # Contact forces from ground. box_forces +=", "contact returns: contact force in x and z \"\"\" # Get the position", "box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot new_finger_state", "intersect the ground at z = 0 half_size = box_size / 2.0 p_BC", "phi_finger_box) # Use the same simplified friction model as used for ground contact", "velocity of contact point in box frame v_Bcontact = box_state[5] * jnp.array([[0, -1],", "-contact_k * phi_corner_ground[i] normal_force = normal_force - contact_d * normal_velocity * ( phi_corner_ground[i]", "box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W", "the world frame p_WF = finger_state[:2] v_WF = finger_state[2:] # Get penetration into", "finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial)", "* contact_k) # critical damping # General properties g = 9.81 dt =", "theta_B = box_pose[2] p_BF_W = p_WF - p_WB # Rotate p_BF_W by -theta", "c, psi_s, contact_k, contact_d): \"\"\"Compute the contact force between a finger and the", "ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger_forces", "jnp.sqrt(2) * half_size v_BC = ( box_state[5] * r * jnp.array( [ [-1,", "jnp.cross(p_BC_W[i, :], contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d,", "contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping # General properties", "22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1] normal_force = -contact_k", "finger_forces += calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces", "= finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF - p_WB R_WB = rotation_matrix(box_state[2])", "the finger box_size: side length of box returns: float signed distance \"\"\" #", "- contact_d * normal_velocity * (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3)", "to only consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the same", "sum up the forces and torques due to penetration with the ground contact_wrench_on_box", "box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) box_forces", "normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up) ) normal = normal_right * jnp.array([1.0, 0.0]) normal", "finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in range(1, N_steps): # get", "psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d, c, psi_s, contact_k, contact_d", "of the finger finger1_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state", "contact wrench on box in x, z, and theta. - contact force on", "finger in x and z. \"\"\" # Contact point is just the finger", "box frame v_contactF_B = v_BF - v_Bcontact # Get the normal vector of", "def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ): \"\"\"Compute a single discrete-time update for", "normal_velocity * (phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s", "jnp.zeros(3) # Gravitational force on each body finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces", "+ 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask * c *", "using the penalty method for contact modelling with a simplified Coulomb friction model", "contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically integrate", "= jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace", "finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d )", "finger_state_desired: desired (x_d, z_d) state of the finger finger_control_stiffness: the parameter for the", "finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg) finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g", "and ground while slipping c: coefficient of tangential velocity in determining sticking friction", "is just the finger point in the box frame p_WF = finger_state[:2] p_WB", "mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute the contact wrench between the box", ") normal = normal_right * jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0, 0.0])", "finger1_state_desired_trace[i - 1] current_finger2_state = finger2_state_trace[i - 1, :] current_finger2_state_desired = finger2_state_desired_trace[i -", "Finger properties finger_mass_kg = 0.1 finger_control_damping = 2 # Contact properties mu_d =", "contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force) # Also add the torque from this interaction contact_wrench_on_box =", "= v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i] normal_force = normal_force - contact_d", "the z axis to get position in box frame R_WB = jnp.array( [[jnp.cos(theta_B),", "jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace =", "def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace, finger2_state_initial, finger2_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution", "to penetration with the ground contact_wrench_on_box = jnp.zeros(3) for i in range(4): #", "= jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces = contact_force_W return", "force. Approximate ground force as a damped spring, as in # the simplified", "* finger_pos_error finger_forces = finger_forces + finger_control_damping * finger_vel_error # Contact forces from", "= v_contactF_B.dot(normal) # scalar, along the normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity", "theta\"\"\" return jnp.array( [[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]] ) @jax.jit def calc_finger_ground_force(finger_state, mu_d, c,", "on fingers finger1_pos_error = finger1_state_desired - finger1_state[:2] finger1_vel_error = -finger1_state[2:] finger1_forces = finger1_forces", "arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4))", "box_pose: current (x, z, theta) state of the box finger_pose: current (x, z)", "the normal vector # and points in the same direction as the relative", "distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip to only consider", "and finger box_wrench = jnp.zeros(3) box_wrench = box_wrench.at[:2].add(-contact_force_W) box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W)) finger_forces", "current (x, z, theta, vx, vz, omega) state of the box box_size: float", "+= box_force_on_finger2 ###################################### # Numerically integrate ###################################### # Build the derivatives matrix box_state_dot", "contact wrench in x, z, and theta. \"\"\" # Start by finding any", "values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness,", "finger box_size: side length of box returns: float signed distance \"\"\" # Credit", "box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot new_finger_state = finger_state", "z axis to get position in box frame R_WB = jnp.array( [[jnp.cos(theta_B), -jnp.sin(theta_B)],", ":].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i", "(x, z, theta, vx, vz, omega) state of the box finger_state_initial: initial (x,", "box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d", "dt * box_state_dot new_finger_state = finger_state + dt * finger_state_dot return new_box_state, new_finger_state", "finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for i in range(1, N_steps): # get", "but with damping. normal_velocity = v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force =", "1e-3) + 1e-3) # Get signed distance phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size", "finger1_pos_error finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2]", "= jnp.array( [ [-half_size, half_size], # top left [half_size, half_size], # top right", "psi_s, contact_k, contact_d ) # Contact forces between box and finger finger_wrench_on_box, box_force_on_finger", "finger1_vel_error finger2_pos_error = finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces +", "relative to contact pt in box frame v_contactF_B = v_BF - v_Bcontact #", "slipping_mask * mu_d * tangent tangent_force = -mu * normal_force # vector! #", "calc_finger_ground_force( finger_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between box", "box_state_dot.at[5].add(box_forces[2] / box_inertia) # Itegrate new_box_state = box_state + dt * box_state_dot new_finger1_state", "phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces. Approximate ground force as", "box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state, mu_d,", "just the finger point in the box frame p_WF = finger_state[:2] p_WB =", "= box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF - p_WB # Rotate p_BF_W", "finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step(", "import jax @jax.jit def box_finger_signed_distance(box_pose, finger_pose, box_size): \"\"\"Compute the signed distance from the", "c, psi_s, contact_k, contact_d ): \"\"\"Compute the contact wrench between the box and", "theta, vx, vz, omega) state of the box box_size: float indicating the side", "# Build the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities", "= -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces +", "+= finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m, mu_d,", ":].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in range(1, N_steps): #", "left [half_size, half_size], # top right [-half_size, -half_size], # bottom left [half_size, -half_size],", "[-half_size, half_size], # top left [half_size, half_size], # top right [-half_size, -half_size], #", "2.0), p_BF[0] - box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0),", ") @jax.jit def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact force", "ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger1_forces", "# the simplified friction model from eq 21 and 22 in # https://arxiv.org/pdf/2109.05143.pdf,", "= mu_d / c contact_k = 1000 contact_d = 2 * jnp.sqrt(box_mass_kg *", "finger_control_stiffness * finger2_pos_error finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error # Contact forces", "finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d ): \"\"\"Compute the contact wrench between", "rotation_matrix(box_state[2]) p_BF = R_WB.T @ p_BF_W # Get velocity of the finger in", "contact_d ): \"\"\"Compute the contact wrench between the box and the ground. args:", "stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and- # rectangle # First transform the finger (x, z) into the box", "vx, vz, omega) state of the box finger1_state: current (x, z, vx, vz)", "the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace =", "contact_force) ) return contact_wrench_on_box @jax.jit def calc_box_finger_wrench( box_state, finger_state, box_size, mu_d, c, psi_s,", "args: box_pose: current (x, z, theta) state of the box finger_pose: current (x,", "each body ###################################### finger1_forces = jnp.zeros(2) finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) #", "(x_d, z_d) state of the first finger finger2_state: current (x, z, vx, vz)", "psi_s, contact_k, contact_d ) box_forces += finger2_wrench_on_box finger2_forces += box_force_on_finger2 ###################################### # Numerically", "calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d): \"\"\"Compute the contact wrench between the", "but with damping. normal_velocity = v_WC[i, 1] normal_force = -contact_k * phi_corner_ground[i] normal_force", "= finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness *", "finger2_state_desired - finger2_state[:2] finger2_vel_error = -finger2_state[2:] finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error", "only consider negative values phi_finger_box = jnp.minimum(0, phi_finger_box) # Use the same simplified", "the box finger_pose: current (x, z) state of the finger box_size: side length", "ground at z = 0 half_size = box_size / 2.0 p_BC = jnp.array(", "first finger finger1_state_desired: desired (x_d, z_d) state of the first finger finger2_state: current", "# bottom right ] ) # corner points in box frame # Transform", "box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) #", "simulate returns: box_state_trace, finger_state_trace \"\"\" # Create arrays to store simulation traces box_state_trace", "indicating the side length of the box mu_d: coefficient of friction between box", "General properties g = 9.81 dt = 0.001 # seconds per step ######################################", "new_box_state, new_finger_state def box_single_finger_simulate( box_state_initial, finger_state_initial, finger_state_desired_trace, finger_control_stiffness, N_steps, ): \"\"\"Simulate the evolution", "for box manipulation with one finger, using the penalty method for contact modelling", "vx, vz) state of the finger finger_state_desired_trace: N_steps x 2 array of desired", "tangential_velocity + slipping_mask * mu_d tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force,", "# https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1] normal_force = -contact_k * phi_finger_ground", "finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error # Contact forces from ground. box_forces", "the normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3) #", "* normal_velocity * (phi_finger_ground < 0) tangential_velocity = v_WF[0] sticking_mask = jnp.abs(tangential_velocity) <=", "the finger over time finger_control_stiffness: the parameter for the finger stiffness control N_steps:", "phi_finger_box # scalar, in normal direction normal_force = normal_force - contact_d * normal_velocity", "while slipping c: coefficient of tangential velocity in determining sticking friction psi_s: tangential", "vx, vz) state of the finger finger_state_desired: desired (x_d, z_d) state of the", "finger_vel_error # Contact forces from the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d,", "= -contact_k * phi_corner_ground[i] normal_force = normal_force - contact_d * normal_velocity * (", "state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state, current_finger1_state_desired, current_finger2_state, current_finger2_state_desired, finger_control_stiffness, )", "= signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm(", "p_BF[0] - box_size / 2.0) z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1]", "control inputs args: box_state_initial: initial (x, z, theta, vx, vz, omega) state of", "= jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:])", "along the normal vector tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3)", "= R_WB.T @ v_BF_W # Get velocity of contact point in box frame", "next state next_box_state, next_finger_state = box_single_finger_step( current_box_state, current_finger_state, current_finger_state_desired, finger_control_stiffness, ) # Save", "jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) mu = sticking_mask * c", "jnp.logical_not(sticking_mask) mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d *", "# bottom right ] ) ) # corner point velocities in box frame", "= box_state_trace.at[i, :].set(next_box_state) finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state) # Return the simulated values return", "signed distance. phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist)) phi = phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3,", "calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger1_forces += calc_finger_ground_force( finger1_state,", "and 22 in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WC[i, 1] normal_force", "+ tangent_force # transform into the world frame contact_force_W = R_WB @ contact_force_B", "friction model as used for ground contact normal_force = -contact_k * phi_finger_box #", "length of box returns: float signed distance \"\"\" # Credit to this stackoverflow", "side length of the box mu_d: coefficient of friction between box and ground", "the normal vector of the contact in the box frame right_or_up = p_BF[1]", "current (x, z, vx, vz) state of the finger box_size: float indicating the", "+ finger_control_damping * finger_vel_error # Contact forces from the ground. box_forces += calc_box_ground_wrench(", "box in x, z, and theta. - contact force on finger in x", "R_WB @ contact_force_B # Add the contact force to the box and finger", "finger2_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body finger1_forces", "finger finger1_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the", "= jnp.zeros(3) # Gravitational force on each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg)", "# Transform into world frame R_WB = rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T", "* box_mass_kg * box_side_m ** 2 # Finger properties finger_mass_kg = 0.1 finger_control_damping", "box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial) # Simulate for i in", "z) state of the finger box_size: side length of box returns: float signed", "normal_up = jnp.logical_and(right_or_up, left_or_up) normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up) normal_down = jnp.logical_and( jnp.logical_not(right_or_up), jnp.logical_not(left_or_up)", "* tangent tangent_force = -mu * normal_force # vector! # Sum up the", "the ground. box_forces += calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d )", "per step ###################################### # Get forces on each body ###################################### finger1_forces = jnp.zeros(2)", "velocity of the finger in the world frame p_WF = finger_state[:2] v_WF =", "model args: box_state: current (x, z, theta, vx, vz, omega) state of the", "2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping # General properties g =", "# Also find the velocities of each corner point r = jnp.sqrt(2) *", "Create arrays to store simulation traces box_state_trace = jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps,", "> -p_BF[0] left_or_up = p_BF[1] > p_BF[0] normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up)) normal_up =", "Build the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4)", "finger2_state_initial: initial (x, z, vx, vz) state of the finger finger2_state_desired_trace: N_steps x", "= sticking_mask * c * tangential_velocity + slipping_mask * mu_d * tangent tangent_force", "* (phi_finger_box < 0) sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s slipping_mask =", "box mu_d: coefficient of friction between box and ground while slipping c: coefficient", "[ [-1, -1], # top left [-1, 1], # top right [1, -1],", "contact_d ) # Contact forces between box and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench(", "finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg) finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg) # Torques box_state_dot", "\"\"\" # Get the position and velocity of the finger in the world", "phi + jnp.linalg.norm( jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist])) ) return phi @jax.jit def rotation_matrix(theta):", "jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot =", "(R_WB @ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also find", "@ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1]) # Find any that have negative z:", "box_size): \"\"\"Compute the signed distance from the box to the finger args: box_pose:", "contact_d: damping constant of contact returns: contact wrench in x, z, and theta.", "between the box and the ground. args: box_state: current (x, z, theta, vx,", "forces on finger finger_pos_error = finger_state_desired - finger_state[:2] finger_vel_error = -finger_state[2:] finger_forces =", "4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial) finger_state_trace = finger_state_trace.at[0,", "normal = normal_right * jnp.array([1.0, 0.0]) normal += normal_left * jnp.array([-1.0, 0.0]) normal", "forces between box and finger finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench( box_state, finger_state, box_side_m, mu_d,", "jnp.zeros((N_steps, 6)) finger1_state_trace = jnp.zeros((N_steps, 4)) finger2_state_trace = jnp.zeros((N_steps, 4)) # Store the", "by finding any box corner points that intersect the ground at z =", "tangent_force = -mu * normal_force contact_force = jnp.array([tangent_force, normal_force]).reshape(2) return contact_force @jax.jit def", "= rotation_matrix(box_state[2]) p_BC_W = (R_WB @ p_BC.T).T p_WC = p_BC_W + jnp.tile(box_state[:2], [4,", "R_BW @ p_BF_W # Now get the signed distance x_dist = jnp.maximum(-(p_BF[0] +", "box_forces += finger1_wrench_on_box finger1_forces += box_force_on_finger1 finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench( box_state, finger2_state, box_side_m,", "* jnp.array( [ [-1, -1], # top left [-1, 1], # top right", "to the finger args: box_pose: current (x, z, theta) state of the box", "of the finger box_size: side length of box returns: float signed distance \"\"\"", "the ground at z = 0 half_size = box_size / 2.0 p_BC =", "finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on finger finger_pos_error =", "finger stiffness control N_steps: int specifying the number of discrete time steps to", "box_state_trace.at[0, :].set(box_state_initial) finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial) finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial) # Simulate for", "jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot", "= finger2_state + dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial,", "p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1]) # Also find the velocities of", "# Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact", "vx, vz) state of the finger finger1_state_desired_trace: N_steps x 2 array of desired", "returns: Tuple of - contact wrench on box in x, z, and theta.", "array of desired (x_d, z_d) state of the finger over time finger_control_stiffness: the", "Coulomb friction model args: box_state: current (x, z, theta, vx, vz, omega) state", "finger2_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg) # Control forces on fingers", "of - contact wrench on box in x, z, and theta. - contact", "+ dt * finger1_state_dot new_finger2_state = finger2_state + dt * finger2_state_dot return new_box_state,", "returns: box_state_trace, finger_state_trace \"\"\" # Create arrays to store simulation traces box_state_trace =", "box box_size: float indicating the side length of the box mu_d: coefficient of", "return box_state_trace, finger_state_trace @jax.jit def box_two_finger_step( box_state, finger1_state, finger1_state_desired, finger2_state, finger2_state_desired, finger_control_stiffness, ):", "omega) state of the box finger_state_initial: initial (x, z, vx, vz) state of", "= v_WF[1] normal_force = -contact_k * phi_finger_ground normal_force = normal_force - contact_d *", "p_WF = finger_pose p_WB = box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF -", "psi_s, contact_k, contact_d ): \"\"\"Compute the contact wrench between the box and the", "box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:]) # Forces box_state_dot =", "given initial states and applying the specified control inputs args: box_state_initial: initial (x,", "vx, vz) state of the finger finger2_state_desired_trace: N_steps x 2 array of desired", "in the box frame contact_force_B = normal_force * normal + tangent_force # transform", "in # https://arxiv.org/pdf/2109.05143.pdf, but with damping. normal_velocity = v_WF[1] normal_force = -contact_k *", "Build the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot", "Get forces on each body ###################################### finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) #", "= box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF # Get velocity of", "+= calc_box_ground_wrench( box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d ) finger_forces += calc_finger_ground_force(", "v_WF - v_WB v_BF = R_WB.T @ v_BF_W # Get velocity of contact", "ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get the contact forces. Approximate ground force", "= contact_force_W return box_wrench, finger_forces @jax.jit def box_single_finger_step( box_state, finger_state, finger_state_desired, finger_control_stiffness, ):", "vz) state of the finger finger2_state_desired_trace: N_steps x 2 array of desired (x_d,", "phi_finger_box = box_finger_signed_distance( box_state[:3], finger_state[:2], box_size ) # Clip to only consider negative", "current_box_state = box_state_trace[i - 1, :] current_finger_state = finger_state_trace[i - 1, :] current_finger_state_desired", "# Finger properties finger_mass_kg = 0.1 finger_control_damping = 2 # Contact properties mu_d", "box frame p_WF = finger_state[:2] p_WB = box_state[:2] p_BF_W = p_WF - p_WB", "state of the finger finger_state_desired_trace: N_steps x 2 array of desired (x_d, z_d)", "finger_forces = jnp.zeros(2) box_forces = jnp.zeros(3) # Gravitational force on each body finger_forces", "# top right [1, -1], # bottom left [1, 1], # bottom right", "box frame v_WF = finger_state[2:] v_WB = box_state[3:5] v_BF_W = v_WF - v_WB", "jnp.zeros(4) finger2_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:]) finger2_state_dot", "box_state: current (x, z, theta, vx, vz, omega) state of the box finger1_state:", "(x, z, theta, vx, vz, omega) state of the box finger1_state: current (x,", "box_state_initial: initial (x, z, theta, vx, vz, omega) state of the box finger_state_initial:", "Transform to world frame v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1])", "of the contact in the box frame right_or_up = p_BF[1] > -p_BF[0] left_or_up", "+= calc_finger_ground_force( finger2_state, mu_d, c, psi_s, contact_k, contact_d ) # Contact forces between", "# Find any that have negative z: min(0, signed distance) phi_corner_ground = jnp.minimum(jnp.zeros(4),", "= finger_state[2:] # Get penetration into ground phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1]) # Get", "direction as the relative velocity tangential_velocity = ( v_contactF_B - v_contactF_B.dot(normal) * normal", "R_WB.T p_BF = R_BW @ p_BF_W # Now get the signed distance x_dist", "finger_state_dot.at[:2].add(finger_state[2:]) # Forces box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg) finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg)", "the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4) finger2_state_dot = jnp.zeros(4) #", "between a finger and the ground. args: finger_state: current (x, z, theta, vx,", "update for box manipulation with one finger, using the penalty method for contact", "- 1] # get next state next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step( current_box_state, current_finger1_state,", "v_WB = box_state[3:5] v_BF_W = v_WF - v_WB v_BF = R_WB.T @ v_BF_W", "* normal + tangent_force # transform into the world frame contact_force_W = R_WB", "+ dt * finger2_state_dot return new_box_state, new_finger1_state, new_finger2_state def box_two_finger_simulate( box_state_initial, finger1_state_initial, finger1_state_desired_trace,", "on each body finger_forces = finger_forces.at[1].add(-g * finger_mass_kg) box_forces = box_forces.at[1].add(-g * box_mass_kg)", "v_WC[i, 0] sticking_mask = jnp.abs(tangential_velocity) <= psi_s slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity) mu", "2.0 psi_s = mu_d / c contact_k = 1000 contact_d = 2 *", "integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger1_state_dot = jnp.zeros(4)", "contact normal_force = -contact_k * phi_finger_box # scalar, in normal direction normal_force =", "- 1, :] current_finger2_state_desired = finger2_state_desired_trace[i - 1] # get next state next_box_state,", "finger_state_trace = jnp.zeros((N_steps, 4)) # Store the initial conditions box_state_trace = box_state_trace.at[0, :].set(box_state_initial)", "Numerically integrate ###################################### # Build the derivatives matrix box_state_dot = jnp.zeros(6) finger_state_dot =", "= jnp.zeros(6) finger_state_dot = jnp.zeros(4) # Velocities box_state_dot = box_state_dot.at[:3].add(box_state[3:]) finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:])", "state of the finger box_size: float indicating the side length of the box", "# Now get the signed distance x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0),", "box_pose[:2] theta_B = box_pose[2] p_BF_W = p_WF - p_WB # Rotate p_BF_W by", "and fingers finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench( box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k," ]
[ "2021-01-18 03:51 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations = [", "= [ ('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations", "django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'),", "= [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating', field=models.DecimalField(decimal_places=1, max_digits=2,", "models class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField(", "on 2021-01-18 03:51 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations =", "operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating', field=models.DecimalField(decimal_places=1,", "Generated by Django 3.1.5 on 2021-01-18 03:51 import django.core.validators from django.db import migrations,", "migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating', field=models.DecimalField(decimal_places=1, max_digits=2, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]),", "by Django 3.1.5 on 2021-01-18 03:51 import django.core.validators from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class', name='cost',", "# Generated by Django 3.1.5 on 2021-01-18 03:51 import django.core.validators from django.db import", "class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class',", "[ ('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ),", "dependencies = [ ('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2,", "] operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classes', '0003_auto_20210117_2058'), ]", "[ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating', field=models.DecimalField(decimal_places=1, max_digits=2, validators=[django.core.validators.MaxValueValidator(5),", "Django 3.1.5 on 2021-01-18 03:51 import django.core.validators from django.db import migrations, models class", "name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating', field=models.DecimalField(decimal_places=1, max_digits=2, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]), ), ]", "'0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review',", "03:51 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField( model_name='review', name='rating', field=models.DecimalField(decimal_places=1, max_digits=2, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]), ),", "import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classes',", "('classes', '0003_auto_20210117_2058'), ] operations = [ migrations.AlterField( model_name='class', name='cost', field=models.DecimalField(decimal_places=2, max_digits=8), ), migrations.AlterField(", "3.1.5 on 2021-01-18 03:51 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration):" ]
[ "print(r[0]) # cur.close() # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 =", "?, 1) ||'-01' )\" # cur.execute( sttm, ( 3, ) ) # for", "end # ''' # # db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor()", "#--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5')", "suspend; # end # ''' # # db_conn.execute_immediate( proc_ddl ) # db_conn.commit() #", "FB3 throws \"Datatypes are not comparable in expression\" in procedure parameters # decription:", "for r in cur: # print(r[0]) # cur.close() # # #--- #act_1 =", "# suspend; # end # ''' # # db_conn.execute_immediate( proc_ddl ) # db_conn.commit()", "= a_dts; # suspend; # end # ''' # # db_conn.execute_immediate( proc_ddl )", "#--- # # proc_ddl=''' # create or alter procedure test_proc ( a_dts timestamp)", "[] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # #", "on 4.0.0.1573; 3.0.x is still affected # # tracker_id: CORE-6108 # min_versions: ['2.5']", "''' # # db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor() # #", "min_versions: ['2.5'] # versions: 2.5 # qmid: None import pytest from firebird.qa import", "alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as # begin", "resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) #", "import pytest from firebird.qa import db_factory, isql_act, Action # version: 2.5 # resources:", "test_script_1 #--- # # proc_ddl=''' # create or alter procedure test_proc ( a_dts", "timestamp) as # begin # o_dts = a_dts; # suspend; # end #", "= \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # proc_ddl=''' #", "pytest from firebird.qa import db_factory, isql_act, Action # version: 2.5 # resources: None", "bug on 4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x is still affected", "# db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?,", "# # proc_ddl=''' # create or alter procedure test_proc ( a_dts timestamp) returns", "Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x is still", "= [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- #", "# print(r[0]) # cur.close() # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1", "3.0.x is still affected # # tracker_id: CORE-6108 # min_versions: ['2.5'] # versions:", "# min_versions: ['2.5'] # versions: 2.5 # qmid: None import pytest from firebird.qa", "# #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\"", "4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x is still affected # #", "decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x", "returns ( o_dts timestamp) as # begin # o_dts = a_dts; # suspend;", "cur: # print(r[0]) # cur.close() # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)", "test_proc ( a_dts timestamp) returns ( o_dts timestamp) as # begin # o_dts", "# # sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" # cur.execute(", "# ''' # # db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor() #", "3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x is still affected # # tracker_id:", "# resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1)", "o_dts timestamp) as # begin # o_dts = a_dts; # suspend; # end", "\"Datatypes are not comparable in expression\" in procedure parameters # decription: # Confirmed", "# proc_ddl=''' # create or alter procedure test_proc ( a_dts timestamp) returns (", ") ) # for r in cur: # print(r[0]) # cur.close() # #", "Action # version: 2.5 # resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\"", "# cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\"", "Regression: FB3 throws \"Datatypes are not comparable in expression\" in procedure parameters #", "None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1", "# sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" # cur.execute( sttm,", "( a_dts timestamp) returns ( o_dts timestamp) as # begin # o_dts =", "CORE-6108 # min_versions: ['2.5'] # versions: 2.5 # qmid: None import pytest from", "a_dts timestamp) returns ( o_dts timestamp) as # begin # o_dts = a_dts;", "begin # o_dts = a_dts; # suspend; # end # ''' # #", "# # id: bugs.core_6108 # title: Regression: FB3 throws \"Datatypes are not comparable", "# # tracker_id: CORE-6108 # min_versions: ['2.5'] # versions: 2.5 # qmid: None", "in cur: # print(r[0]) # cur.close() # # #--- #act_1 = python_act('db_1', test_script_1,", ") # db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'|| COALESCE(", "not comparable in expression\" in procedure parameters # decription: # Confirmed bug on", "tracker_id: CORE-6108 # min_versions: ['2.5'] # versions: 2.5 # qmid: None import pytest", "# end # ''' # # db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # #", "or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as #", "cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" #", "expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5') @pytest.mark.xfail def test_1(db_1): pytest.fail(\"Test not IMPLEMENTED\")", "proc_ddl=''' # create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts", "comparable in expression\" in procedure parameters # decription: # Confirmed bug on 4.0.0.1567;", ")\" # cur.execute( sttm, ( 3, ) ) # for r in cur:", "expression\" in procedure parameters # decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160. #", "2.5 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action #", "db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1)", "title: Regression: FB3 throws \"Datatypes are not comparable in expression\" in procedure parameters", ") # for r in cur: # print(r[0]) # cur.close() # # #---", "on 4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x is still affected #", "id: bugs.core_6108 # title: Regression: FB3 throws \"Datatypes are not comparable in expression\"", "4.0.0.1573; 3.0.x is still affected # # tracker_id: CORE-6108 # min_versions: ['2.5'] #", "parameters # decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works fine on", "# title: Regression: FB3 throws \"Datatypes are not comparable in expression\" in procedure", "# id: bugs.core_6108 # title: Regression: FB3 throws \"Datatypes are not comparable in", "affected # # tracker_id: CORE-6108 # min_versions: ['2.5'] # versions: 2.5 # qmid:", "isql_act, Action # version: 2.5 # resources: None substitutions_1 = [] init_script_1 =", "o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" # cur.execute( sttm, ( 3,", "# test_script_1 #--- # # proc_ddl=''' # create or alter procedure test_proc (", "substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #---", "# cur.execute( sttm, ( 3, ) ) # for r in cur: #", "||'-01' )\" # cur.execute( sttm, ( 3, ) ) # for r in", "# Works fine on 4.0.0.1573; 3.0.x is still affected # # tracker_id: CORE-6108", "throws \"Datatypes are not comparable in expression\" in procedure parameters # decription: #", "test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5') @pytest.mark.xfail def test_1(db_1): pytest.fail(\"Test", "as # begin # o_dts = a_dts; # suspend; # end # '''", "substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5') @pytest.mark.xfail def test_1(db_1): pytest.fail(\"Test not", "cur.execute( sttm, ( 3, ) ) # for r in cur: # print(r[0])", "COALESCE( ?, 1) ||'-01' )\" # cur.execute( sttm, ( 3, ) ) #", "# # db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select", "db_factory, isql_act, Action # version: 2.5 # resources: None substitutions_1 = [] init_script_1", "sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" # cur.execute( sttm, (", "# for r in cur: # print(r[0]) # cur.close() # # #--- #act_1", "# cur.close() # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"", "still affected # # tracker_id: CORE-6108 # min_versions: ['2.5'] # versions: 2.5 #", "in expression\" in procedure parameters # decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160.", "qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 2.5", "a_dts; # suspend; # end # ''' # # db_conn.execute_immediate( proc_ddl ) #", "# begin # o_dts = a_dts; # suspend; # end # ''' #", "2.5 # resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3,", "# create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp)", "proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'||", "create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as", "bugs.core_6108 # title: Regression: FB3 throws \"Datatypes are not comparable in expression\" in", "1) ||'-01' )\" # cur.execute( sttm, ( 3, ) ) # for r", "# qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version:", "r in cur: # print(r[0]) # cur.close() # # #--- #act_1 = python_act('db_1',", "# # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00", "# o_dts = a_dts; # suspend; # end # ''' # # db_conn.execute_immediate(", "in procedure parameters # decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works", "# # cur=db_conn.cursor() # # sttm=\"select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01'", "import db_factory, isql_act, Action # version: 2.5 # resources: None substitutions_1 = []", "3, ) ) # for r in cur: # print(r[0]) # cur.close() #", "Works fine on 4.0.0.1573; 3.0.x is still affected # # tracker_id: CORE-6108 #", "init=init_script_1) # test_script_1 #--- # # proc_ddl=''' # create or alter procedure test_proc", "are not comparable in expression\" in procedure parameters # decription: # Confirmed bug", "firebird.qa import db_factory, isql_act, Action # version: 2.5 # resources: None substitutions_1 =", "sttm, ( 3, ) ) # for r in cur: # print(r[0]) #", "#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5') @pytest.mark.xfail", "timestamp) returns ( o_dts timestamp) as # begin # o_dts = a_dts; #", "python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5') @pytest.mark.xfail def test_1(db_1):", "db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # proc_ddl=''' # create or", "#coding:utf-8 # # id: bugs.core_6108 # title: Regression: FB3 throws \"Datatypes are not", "test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" # cur.execute( sttm, ( 3, ) )", "# decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573;", "versions: 2.5 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action", "( 3, ) ) # for r in cur: # print(r[0]) # cur.close()", "# version: 2.5 # resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1", "init_script_1 = \"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # proc_ddl='''", "# db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select o_dts", "fine on 4.0.0.1573; 3.0.x is still affected # # tracker_id: CORE-6108 # min_versions:", "None import pytest from firebird.qa import db_factory, isql_act, Action # version: 2.5 #", "= python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01 00:00:00 \"\"\" @pytest.mark.version('>=2.5') @pytest.mark.xfail def", "# Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works fine on 4.0.0.1573; 3.0.x is", "( o_dts timestamp) as # begin # o_dts = a_dts; # suspend; #", "procedure parameters # decription: # Confirmed bug on 4.0.0.1567; 3.0.5.33160. # Works fine", "# versions: 2.5 # qmid: None import pytest from firebird.qa import db_factory, isql_act,", "o_dts = a_dts; # suspend; # end # ''' # # db_conn.execute_immediate( proc_ddl", "procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as # begin #", "version: 2.5 # resources: None substitutions_1 = [] init_script_1 = \"\"\"\"\"\" db_1 =", "['2.5'] # versions: 2.5 # qmid: None import pytest from firebird.qa import db_factory,", "from firebird.qa import db_factory, isql_act, Action # version: 2.5 # resources: None substitutions_1", "is still affected # # tracker_id: CORE-6108 # min_versions: ['2.5'] # versions: 2.5", "= db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # proc_ddl=''' # create or alter", "db_conn.execute_immediate( proc_ddl ) # db_conn.commit() # # cur=db_conn.cursor() # # sttm=\"select o_dts from", "cur.close() # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\" 2019-03-01", "\"\"\"\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # proc_ddl=''' # create", "# tracker_id: CORE-6108 # min_versions: ['2.5'] # versions: 2.5 # qmid: None import", "db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # proc_ddl=''' # create or alter procedure", "from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )\" # cur.execute( sttm, ( 3, )" ]
[ "competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module setup for the simulator mods =", "AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import", "Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class)", "\\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module", "attr_set = simulator_setup_1day() # build \"policies\" list that contains all bidding policies policy1", "set) :return: simulator, state set, action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import", "\\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from", "from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\"", "import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator", "+ dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state set", "= dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes # Initialize auctions priors", "AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set action_set = ActionSet(attr_set, max_bid=9.99,", "attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from", "results[ix]) # each policy will learn with result # note that policy in", "= initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params", "attribute set) :return: simulator, state set, action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module", "Params = module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) #", "auction_attributes priors module_class = AuctionAttributesModule Params = module_class.Params params = Params(p=1.0) # Probabilities", "fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module setup for the simulator", "policies \"\"\" # import policy classes from files from policy2019 import Policy2019 from", "auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors,", "import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from", "\\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from", "= ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\", "\"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set names = ['gender', 'age']", "learn with result # note that policy in index ix gets result in", "[{(): params}] * len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors =", "competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors,", "= \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to),", "attr_set if __name__ == \"__main__\": \"\"\" This script shows how the bidding policies", "noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class) # Initialize competitive_cpc priors module_class =", "AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import", "r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class", "= module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class)", "\\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module", "params = Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class)", "dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors auctions_base_classes = base_classes # Initialize", "set, action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import", "priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy()", "min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] =", "and problem definition (state set, action set, and attribute set) :return: simulator, state", "= module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize", "initialize_priors( params, module_class) # Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params", "seed = 1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"])", "# Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8,", "import pandas as pd def simulator_setup_1day(): \"\"\" This is a tool to set", "PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy is", "and attribute set) :return: simulator, state set, action set, attribute set \"\"\" from", "Initialize vickrey_auction priors module_class = VickreyAuctionModule Params = module_class.Params params = Params() vickrey_auction_priors,", "CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module", "the bidding policies will interact with the simulator The codes are written out", "params, module_class) # Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params params", "Params = module_class.Params params = Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\", "StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from", "\"\"\" This is a tool to set up a simulator and problem definition", "priors auctions_base_classes = base_classes # Initialize auction_attributes priors module_class = AuctionAttributesModule Params =", "pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors,", "T up to 48. T>48 will cause an error. for t in range(T):", "Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module setup for the", "all for you simulator, state_set, action_set, attr_set = simulator_setup_1day() # build \"policies\" list", "= CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0) return simulator, state_set, action_set, attr_set", "\"policies\" list that contains all bidding policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234)", "ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule", "conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize revenue priors module_class = RevenueGammaNoiseModule Params", "# each policy will learn with result # note that policy in index", "seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes,", "import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule", "to set up a simulator and problem definition (state set, action set, and", "params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_click_probability priors", "= Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is a bare-bone sample policy", "priors module_class = AuctionAttributesModule Params = module_class.Params params = Params(p=1.0) # Probabilities are", "are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class", "Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is a bare-bone sample policy that", "in enumerate(policies): p.learn(s, results[ix]) # each policy will learn with result # note", "mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors,", "policy_thompson import PolicyThompsonSamplingSI # handy function that initializes all for you simulator, state_set,", "this policy is one of production level policies that needs this extra step", "import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from", "AuctionAttributesModule Params = module_class.Params params = Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes", "vals = {'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69',", "module_class = AuctionAttributesModule Params = module_class.Params params = Params(p=1.0) # Probabilities are normalized", "'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations", "bidding policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is a", "columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\")", "= AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set action_set = ActionSet(attr_set,", "'60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set", "1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] =", "understanding and convenient debugging for your policies \"\"\" # import policy classes from", "# Initialize conversion_rate priors module_class = ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02,", "attr_set, mods, date_from, date_to, income_share=1.0) return simulator, state_set, action_set, attr_set if __name__ ==", "seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy is one of production", "\\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from", "priors module_class = VickreyAuctionModule Params = module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\", "params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class) # Initialize competitive_cpc", "= CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ =", "# this policy is one of production level policies that needs this extra", "\"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to,", "\"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) }", "Actions={}\".format(actions)) results = simulator.step(actions) for ix, p in enumerate(policies): p.learn(s, results[ix]) # each", "= \"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates", "bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix, p in enumerate(policies): p.learn(s,", "priors module_class = ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors,", "policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019 with different seed on-the-fly", "Initialize auctions priors module_class = AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100) priors", "['gender', 'age'] vals = {'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49',", "for your policies \"\"\" # import policy classes from files from policy2019 import", "competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes,", "up a simulator and problem definition (state set, action set, and attribute set)", "conversions priors module_class = ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors,", "day) T = 24 # note that this particular setup limits T up", "from policy_thompson import PolicyThompsonSamplingSI # handy function that initializes all for you simulator,", "to 48. T>48 will cause an error. for t in range(T): s =", "state={}\".format(simulator.state)) actions = [] for p in policies: pol_action = p.act(s) # each", "\\ = initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class = VickreyAuctionModule Params =", "r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class =", "auctions_base_classes = base_classes # Initialize auction_attributes priors module_class = AuctionAttributesModule Params = module_class.Params", "seed=1234) # this policy is a bare-bone sample policy that bids randomly without", "= p.act(s) # each policy responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results", "CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule", "= module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\", "module_class = CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes =", "return priors, base_classes # Initialize auctions priors module_class = AuctionsPoissonModule Params = module_class.Params", "This script shows how the bidding policies will interact with the simulator The", "simulator, state_set, action_set, attr_set if __name__ == \"__main__\": \"\"\" This script shows how", "Module setup for the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\":", "\"base_class\"] = module_class auctions_priors = priors auctions_base_classes = base_classes # Initialize auction_attributes priors", "= module_class auctions_priors = priors auctions_base_classes = base_classes # Initialize auction_attributes priors module_class", "Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params,", "The codes are written out for easier understanding and convenient debugging for your", "list that contains all bidding policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) #", "AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] =", "0.02, \"rpv_default\": 300.0}}) # this policy is one of production level policies that", "a simulator and problem definition (state set, action set, and attribute set) :return:", "initialize_priors(params, module_class) # Initialize conversions priors module_class = ConversionsBinomialModule Params = module_class.Params params", "s = simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = [] for", "= base_class return priors, base_classes # Initialize auctions priors module_class = AuctionsPoissonModule Params", "script shows how the bidding policies will interact with the simulator The codes", "dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes # Initialize auctions priors module_class", "conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator =", "date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1", "that bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\":", "from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\", "r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_clicks priors", "from files from policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy function", "p.act(s) # each policy responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results =", "priors.loc[:, \"prior\"] = [{(): params}] * len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] =", "= initialize_priors(params, module_class) # Initialize revenue priors module_class = RevenueGammaNoiseModule Params = module_class.Params", "(corresponding to 1 simulated day) T = 24 # note that this particular", ":return: simulator, state set, action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule", "dates.copy() priors.loc[:, \"prior\"] = [{(): params}] * len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"]", "= attr_set.get_all_attr_tuples() # Initialize action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)", "on-the-fly # Simulator will run 24 steps (t=0,1,...,23) (corresponding to 1 simulated day)", "from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import", "is a tool to set up a simulator and problem definition (state set,", "= simulator_setup_1day() # build \"policies\" list that contains all bidding policies policy1 =", "tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 +", "if __name__ == \"__main__\": \"\"\" This script shows how the bidding policies will", "= pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates,", "= initialize_priors(params, module_class) # Initialize conversion_rate priors module_class = ConversionRateFlatModule Params = module_class.Params", "note that policy in index ix gets result in index ix. The results", "will run 24 steps (t=0,1,...,23) (corresponding to 1 simulated day) T = 24", "import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from", "set up a simulator and problem definition (state set, action set, and attribute", "module_class) # Initialize vickrey_auction priors module_class = VickreyAuctionModule Params = module_class.Params params =", "VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\":", "\\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed),", "as pd def simulator_setup_1day(): \"\"\" This is a tool to set up a", "initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)]", "module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize", "AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module", "\"\"\" # import policy classes from files from policy2019 import Policy2019 from policy_thompson", "simulator, state_set, action_set, attr_set = simulator_setup_1day() # build \"policies\" list that contains all", "priors module_class = ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes", "attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors))", "import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state", "p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize", "{'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set", "attr_set, seed=1234) # this policy is a bare-bone sample policy that bids randomly", "competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8, p=0.5, q=0.5,", "Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize vickrey_auction priors", "import AttrSet seed = 1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df =", "each policy will learn with result # note that policy in index ix", "auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class = VickreyAuctionModule", "module_class) # Module setup for the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes,", "\"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import", "# Initialize auction_attributes priors module_class = AuctionAttributesModule Params = module_class.Params params = Params(p=1.0)", "priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6,", "to 1 simulated day) T = 24 # note that this particular setup", "module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module", "steps (t=0,1,...,23) (corresponding to 1 simulated day) T = 24 # note that", "VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module", "= 1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"]", "# each policy responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions)", "another policy2019 with different seed on-the-fly # Simulator will run 24 steps (t=0,1,...,23)", "import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module", "set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations =", "AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module", "files from policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy function that", "from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet", "in index ix gets result in index ix. The results can be different", "module_class.Params params = Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params,", "testing script for ORF418 Spring 2019 course \"\"\" import numpy as np import", "numpy as np import pandas as pd def simulator_setup_1day(): \"\"\" This is a", "p in enumerate(policies): p.learn(s, results[ix]) # each policy will learn with result #", "ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module", "module_class.Params params = Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] = [{(): params}] *", "Initialize action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class):", "= Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module setup for", "Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class) # Initialize competitive_cpc priors module_class", "state set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute", "simulator.step(actions) for ix, p in enumerate(policies): p.learn(s, results[ix]) # each policy will learn", "RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\", "from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from", "revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods,", "'40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize", "Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize conversion_rate priors module_class", "normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class =", "limits T up to 48. T>48 will cause an error. for t in", "with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix, p in", "= 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1", "\"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set names = ['gender', 'age'] vals =", "actions = [] for p in policies: pol_action = p.act(s) # each policy", "auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class = VickreyAuctionModule Params", "ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ =", "= AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"]", "\\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module", "action_set, attr_set if __name__ == \"__main__\": \"\"\" This script shows how the bidding", "vickrey_auction priors module_class = VickreyAuctionModule Params = module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes", "state set, action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module", "pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist()", "np import pandas as pd def simulator_setup_1day(): \"\"\" This is a tool to", "policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy function that initializes all", "q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_clicks", "sample policy that bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234)", "convenient debugging for your policies \"\"\" # import policy classes from files from", "ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors,", "= PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy", "48. T>48 will cause an error. for t in range(T): s = simulator.state", "Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize", "CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params,", "auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors,", "import Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy function that initializes all for", "ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule", "CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import", "for easier understanding and convenient debugging for your policies \"\"\" # import policy", "= 24 # note that this particular setup limits T up to 48.", "ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule", "each policy responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for", "p in policies: pol_action = p.act(s) # each policy responds with a bid", "ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from", "\"hour_of_week\"]] # Initialize state set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))])", "'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals)", "vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors,", "contains all bidding policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this policy", "= pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list =", "import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action", "\\ = initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params =", "base_class return priors, base_classes # Initialize auctions priors module_class = AuctionsPoissonModule Params =", "Params = module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class)", "ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df", "tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] =", "names = ['gender', 'age'] vals = {'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29',", "from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import", "simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\":", "revenue_priors, revenue_base_classes = initialize_priors( params, module_class) # Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule", "step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another", "= [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019 with", "action_set, attr_set, seed=9292)) # adding another policy2019 with different seed on-the-fly # Simulator", "script for ORF418 Spring 2019 course \"\"\" import numpy as np import pandas", "StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set names = ['gender',", "from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\", "this particular setup limits T up to 48. T>48 will cause an error.", "module_class auctions_priors = priors auctions_base_classes = base_classes # Initialize auction_attributes priors module_class =", "t in range(T): s = simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions", "vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params", "bid policy testing script for ORF418 Spring 2019 course \"\"\" import numpy as", "priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes", "bidding policies will interact with the simulator The codes are written out for", "base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes # Initialize auctions priors module_class =", "conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize conversions priors module_class = ConversionsBinomialModule", "VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module", "dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") #", "action_set, attr_set, mods, date_from, date_to, income_share=1.0) return simulator, state_set, action_set, attr_set if __name__", "action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations", "results = simulator.step(actions) for ix, p in enumerate(policies): p.learn(s, results[ix]) # each policy", "= AuctionAttributesModule Params = module_class.Params params = Params(p=1.0) # Probabilities are normalized auction_attributes_priors,", "action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy is one", "policy will learn with result # note that policy in index ix gets", "out for easier understanding and convenient debugging for your policies \"\"\" # import", "dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\",", "# Simulator will run 24 steps (t=0,1,...,23) (corresponding to 1 simulated day) T", "Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8, p=0.5,", "state_set, action_set, attr_set = simulator_setup_1day() # build \"policies\" list that contains all bidding", "module_class = CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\", "the simulator The codes are written out for easier understanding and convenient debugging", "ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\", "\"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\":", "import numpy as np import pandas as pd def simulator_setup_1day(): \"\"\" This is", "seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed),", "set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module", "date_to, income_share=1.0) return simulator, state_set, action_set, attr_set if __name__ == \"__main__\": \"\"\" This", "T = 24 # note that this particular setup limits T up to", "in range(T): s = simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions =", "from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from", "with different seed on-the-fly # Simulator will run 24 steps (t=0,1,...,23) (corresponding to", "ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module", "revenue_base_classes = initialize_priors( params, module_class) # Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params", "tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list", "ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params,", "params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize revenue", "\"__main__\": \"\"\" This script shows how the bidding policies will interact with the", "for you simulator, state_set, action_set, attr_set = simulator_setup_1day() # build \"policies\" list that", "module_class = VickreyAuctionModule Params = module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ =", "policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is a bare-bone sample", "Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) #", "module_class = ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes", "= [] for p in policies: pol_action = p.act(s) # each policy responds", "action set, and attribute set) :return: simulator, state set, action set, attribute set", "from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from", "setup limits T up to 48. T>48 will cause an error. for t", "competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors,", "competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule", "# this policy is a bare-bone sample policy that bids randomly without learning", "def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations,", "RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params,", "= RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors(", "cause an error. for t in range(T): s = simulator.state print(\"t={} of {}\".format(t,", "from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet from", "seed=9292)) # adding another policy2019 with different seed on-the-fly # Simulator will run", "level policies that needs this extra step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set,", "priors module_class = AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100) priors = dates.copy()", "for the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes,", "run 24 steps (t=0,1,...,23) (corresponding to 1 simulated day) T = 24 #", "__name__ == \"__main__\": \"\"\" This script shows how the bidding policies will interact", "dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set =", "'30-39', '40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() #", "module_class) # Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params =", "this policy is a bare-bone sample policy that bids randomly without learning policy2", "error. for t in range(T): s = simulator.state print(\"t={} of {}\".format(t, T)) print(\"", "<reponame>donghun2018/adclick-simulator-v2 \"\"\" Sample bid policy testing script for ORF418 Spring 2019 course \"\"\"", "= CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\", "seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from,", "# Initialize vickrey_auction priors module_class = VickreyAuctionModule Params = module_class.Params params = Params()", "\\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import", "import policy classes from files from policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI", "= list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes", "# columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] =", "PolicyThompsonSamplingSI # handy function that initializes all for you simulator, state_set, action_set, attr_set", "# import policy classes from files from policy2019 import Policy2019 from policy_thompson import", "import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from = \"2018-01-01\" date_to", "a tool to set up a simulator and problem definition (state set, action", "# Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8,", "# note that this particular setup limits T up to 48. T>48 will", "CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import", "CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params,", "conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize revenue priors module_class = RevenueGammaNoiseModule", "= module_class.Params params = Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ =", "= module_class.Params params = Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] = [{(): params}]", "initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params", "priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] =", "= Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize revenue priors", "params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params,", "Sample bid policy testing script for ORF418 Spring 2019 course \"\"\" import numpy", "# handy function that initializes all for you simulator, state_set, action_set, attr_set =", "competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module setup for the simulator mods", "policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy is one of production level", "will learn with result # note that policy in index ix gets result", "# Initialize action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params,", "initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params params", "priors module_class = RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes", "interact with the simulator The codes are written out for easier understanding and", "} simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0) return simulator, state_set,", "note that this particular setup limits T up to 48. T>48 will cause", "that this particular setup limits T up to 48. T>48 will cause an", "params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize", "attribute set names = ['gender', 'age'] vals = {'gender': ['M', 'F', 'U'], 'age':", "module_class) # Initialize conversion_rate priors module_class = ConversionRateFlatModule Params = module_class.Params params =", "\"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed),", "ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from", "Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy function that initializes all for you", "that policy in index ix gets result in index ix. The results can", "columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"]", "= initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class = VickreyAuctionModule Params = module_class.Params", "= \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes,", "\"\"\" import numpy as np import pandas as pd def simulator_setup_1day(): \"\"\" This", "with the simulator The codes are written out for easier understanding and convenient", "# note that policy in index ix gets result in index ix. The", "# Initialize auctions priors module_class = AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100)", "CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0) return simulator, state_set, action_set, attr_set if", "particular setup limits T up to 48. T>48 will cause an error. for", "ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator", "vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule", "will interact with the simulator The codes are written out for easier understanding", "set names = ['gender', 'age'] vals = {'gender': ['M', 'F', 'U'], 'age': ['0-19',", "simulator_setup_1day(): \"\"\" This is a tool to set up a simulator and problem", "definition (state set, action set, and attribute set) :return: simulator, state set, action", "= Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize conversion_rate priors", "classes from files from policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy", "= dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors auctions_base_classes = base_classes #", "competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize conversion_rate priors module_class = ConversionRateFlatModule Params", "= module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) #", "RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import", "noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize conversions priors module_class", "\"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\":", "{}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = [] for p in policies: pol_action =", "print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = [] for p in policies:", "ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed =", "up to 48. T>48 will cause an error. for t in range(T): s", "list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes =", "date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from,", "seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0) return simulator,", "vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01,", "# adding another policy2019 with different seed on-the-fly # Simulator will run 24", "= Params(p=1.0) # Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) #", "ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from", "= base_classes # Initialize auction_attributes priors module_class = AuctionAttributesModule Params = module_class.Params params", "= module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) #", "written out for easier understanding and convenient debugging for your policies \"\"\" #", "conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes,", "dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates =", "import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module", "\\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import", "for t in range(T): s = simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state))", "tool to set up a simulator and problem definition (state set, action set,", "Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\")", "AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule(", "for ORF418 Spring 2019 course \"\"\" import numpy as np import pandas as", "policy in index ix gets result in index ix. The results can be", "24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state", "action_set, attr_set, seed=1234) # this policy is a bare-bone sample policy that bids", "len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes # Initialize", "= Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize conversions", "= dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:,", "shows how the bidding policies will interact with the simulator The codes are", "course \"\"\" import numpy as np import pandas as pd def simulator_setup_1day(): \"\"\"", "your policies \"\"\" # import policy classes from files from policy2019 import Policy2019", "from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule", "# build \"policies\" list that contains all bidding policies policy1 = Policy2019(state_set, action_set,", "Simulator will run 24 steps (t=0,1,...,23) (corresponding to 1 simulated day) T =", "Spring 2019 course \"\"\" import numpy as np import pandas as pd def", "import StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111", "This is a tool to set up a simulator and problem definition (state", "ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule", "module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) #", "CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors,", "the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed),", "\"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed),", "import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import", "debugging for your policies \"\"\" # import policy classes from files from policy2019", "\"\"\" This script shows how the bidding policies will interact with the simulator", "module_class) # Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params params =", "seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes,", "is one of production level policies that needs this extra step policies =", "learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) #", "function that initializes all for you simulator, state_set, action_set, attr_set = simulator_setup_1day() #", "= simulator.step(actions) for ix, p in enumerate(policies): p.learn(s, results[ix]) # each policy will", "action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples())", "ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\", "dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list,", "ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from = \"2018-01-01\" date_to =", "\"base_class\"] = base_class return priors, base_classes # Initialize auctions priors module_class = AuctionsPoissonModule", "simulator, state set, action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from", "pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df,", "that needs this extra step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set,", "\\ = initialize_priors(params, module_class) # Initialize revenue priors module_class = RevenueGammaNoiseModule Params =", "from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\", "= CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5)", "simulator_setup_1day() # build \"policies\" list that contains all bidding policies policy1 = Policy2019(state_set,", "initialize_priors(params, module_class) # Initialize vickrey_auction priors module_class = VickreyAuctionModule Params = module_class.Params params", "simulator The codes are written out for easier understanding and convenient debugging for", "policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019", "import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import", "'age'] vals = {'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59',", "seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed)", "module_class = AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100) priors = dates.copy() priors.loc[:,", "this extra step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) #", "import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import", "['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations =", "CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\":", "dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek", "= Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class", "competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes,", "module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_click_probability", "(state set, action set, and attribute set) :return: simulator, state set, action set,", "AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\":", "{\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors, auction_attributes_base_classes, seed), \"vickrey_auction\": VickreyAuctionDateHoWModule(vickrey_auction_priors, vickrey_auction_base_classes, seed), \"competitive_click_probability\":", "range(T): s = simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = []", "handy function that initializes all for you simulator, state_set, action_set, attr_set = simulator_setup_1day()", "base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors auctions_base_classes = base_classes", "one of production level policies that needs this extra step policies = []", "CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed), \"revenue\": RevenueDateHoWModule(revenue_priors,", "[] for p in policies: pol_action = p.act(s) # each policy responds with", "noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize revenue priors module_class =", "min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:,", "seed), \"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set,", "dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day']", "dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date',", "import PolicyThompsonSamplingSI # handy function that initializes all for you simulator, state_set, action_set,", "ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from", "action set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\", "module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize", "pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24", "initializes all for you simulator, state_set, action_set, attr_set = simulator_setup_1day() # build \"policies\"", "attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0,", "Initialize conversions priors module_class = ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\")", "attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy is one of", "[] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019 with different", "that initializes all for you simulator, state_set, action_set, attr_set = simulator_setup_1day() # build", "attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set action_set =", "columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\")", "base_classes # Initialize auctions priors module_class = AuctionsPoissonModule Params = module_class.Params params =", "competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0) return", "module_class) # Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params params =", "print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix, p in enumerate(policies): p.learn(s, results[ix]) #", "len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors auctions_base_classes =", "state_set, action_set, attr_set if __name__ == \"__main__\": \"\"\" This script shows how the", "= [{(): params}] * len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors", "set, and attribute set) :return: simulator, state set, action set, attribute set \"\"\"", "ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from = \"2018-01-01\"", "AttrSet seed = 1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)),", "policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this", "= module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class) #", "seed), \"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes,", "Initialize conversion_rate priors module_class = ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0,", "with result # note that policy in index ix gets result in index", "dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set = StateSet([\"date\", \"how\"], [\"discrete\",", "production level policies that needs this extra step policies = [] policies.append(policy1) policies.append(policy2)", "= dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set = StateSet([\"date\",", "\"prior\"] = [{(): params}] * len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class", "list(range(168))]) # Initialize attribute set names = ['gender', 'age'] vals = {'gender': ['M',", "[dates_list, list(range(168))]) # Initialize attribute set names = ['gender', 'age'] vals = {'gender':", "= module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) #", "policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019 with different seed on-the-fly #", "# Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params = module_class.Params params = Params(noise_level=0.0,", "date_from, date_to, income_share=1.0) return simulator, state_set, action_set, attr_set if __name__ == \"__main__\": \"\"\"", "\"rpv_default\": 300.0}}) # this policy is one of production level policies that needs", "CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet", "import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule", "from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule", "1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates", "for p in policies: pol_action = p.act(s) # each policy responds with a", "import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module", "randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\":", "params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize conversion_rate", "policy that bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\":", "# Initialize revenue priors module_class = RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0,", "different seed on-the-fly # Simulator will run 24 steps (t=0,1,...,23) (corresponding to 1", "simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0) return simulator, state_set, action_set,", "simulator and problem definition (state set, action set, and attribute set) :return: simulator,", "ix, p in enumerate(policies): p.learn(s, results[ix]) # each policy will learn with result", "\\ = initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params =", "competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize conversion_rate priors module_class = ConversionRateFlatModule", "ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute", "needs this extra step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292))", "from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator", "import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import", "T>48 will cause an error. for t in range(T): s = simulator.state print(\"t={}", "= initialize_priors( params, module_class) # Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params =", "ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from", "module_class) # Initialize conversions priors module_class = ConversionsBinomialModule Params = module_class.Params params =", "'70-*']} attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action set action_set", "as np import pandas as pd def simulator_setup_1day(): \"\"\" This is a tool", "= ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ =", "import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from", "attr_set, seed=9292)) # adding another policy2019 with different seed on-the-fly # Simulator will", "ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule", "Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class)", "policies will interact with the simulator The codes are written out for easier", "actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix, p in enumerate(policies): p.learn(s, results[ix])", "setup for the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed), \"auction_attributes\": AuctionAttributesDateHoWModule(auction_attributes_priors,", "date_to), columns=[\"date\"]) dates_list = dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df, on=[\"key\"],", "policy is a bare-bone sample policy that bids randomly without learning policy2 =", "from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import", "['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates", "def simulator_setup_1day(): \"\"\" This is a tool to set up a simulator and", "base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] *", "1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] =", "# Probabilities are normalized auction_attributes_priors, auction_attributes_base_classes \\ = initialize_priors(params, module_class) # Initialize vickrey_auction", "in policies: pol_action = p.act(s) # each policy responds with a bid actions.append(pol_action)", "\"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates =", "initialize_priors(params, module_class) # Initialize conversion_rate priors module_class = ConversionRateFlatModule Params = module_class.Params params", "\"\"\" Sample bid policy testing script for ORF418 Spring 2019 course \"\"\" import", "CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet", "is a bare-bone sample policy that bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set,", "Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class) # Initialize revenue priors module_class", "priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes", "policies: pol_action = p.act(s) # each policy responds with a bid actions.append(pol_action) print(\"", "how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"]", "policy2019 with different seed on-the-fly # Simulator will run 24 steps (t=0,1,...,23) (corresponding", "params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes", "return simulator, state_set, action_set, attr_set if __name__ == \"__main__\": \"\"\" This script shows", "from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed = 1111 date_from =", "= Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class) # Initialize competitive_cpc priors", "print(\" state={}\".format(simulator.state)) actions = [] for p in policies: pol_action = p.act(s) #", "= StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set names =", "# Initialize attribute set names = ['gender', 'age'] vals = {'gender': ['M', 'F',", "result # note that policy in index ix gets result in index ix.", "Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class =", "RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set,", "will cause an error. for t in range(T): s = simulator.state print(\"t={} of", "a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix, p in enumerate(policies):", "\\ = initialize_priors(params, module_class) # Initialize conversions priors module_class = ConversionsBinomialModule Params =", "policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019 with different seed", "on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"]", "dates.copy() priors.loc[:, \"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"]", "= 1111 date_from = \"2018-01-01\" date_to = \"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"]", "from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import ActionSet from ssa_sim_v2.simulator.attribute import AttrSet seed", "attr_set.get_all_attr_tuples() # Initialize action set action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def", "base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors auctions_base_classes = base_classes # Initialize auction_attributes", "easier understanding and convenient debugging for your policies \"\"\" # import policy classes", "max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy()", "an error. for t in range(T): s = simulator.state print(\"t={} of {}\".format(t, T))", "= Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ = initialize_priors(params, module_class)", "revenue priors module_class = RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors,", "dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set = StateSet([\"date\", \"how\"],", "of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = [] for p in policies: pol_action", "T)) print(\" state={}\".format(simulator.state)) actions = [] for p in policies: pol_action = p.act(s)", "params = Params(n_pos=8, fee=0.01) competitive_cpc_priors, competitive_cpc_base_classes = \\ initialize_priors(params, module_class) # Module setup", "Params = module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes", "Params = module_class.Params params = Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] = [{():", "= {'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']}", "# Initialize state set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) #", "Initialize attribute set names = ['gender', 'age'] vals = {'gender': ['M', 'F', 'U'],", "all bidding policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is", "codes are written out for easier understanding and convenient debugging for your policies", "seed on-the-fly # Simulator will run 24 steps (t=0,1,...,23) (corresponding to 1 simulated", "= \\ initialize_priors(params, module_class) # Module setup for the simulator mods = \\", "CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set, mods, date_from, date_to, income_share=1.0)", "base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes # Initialize auctions", "auctions_priors = priors auctions_base_classes = base_classes # Initialize auction_attributes priors module_class = AuctionAttributesModule", "= initialize_priors(params, module_class) # Initialize conversions priors module_class = ConversionsBinomialModule Params = module_class.Params", "\"revenue\": RevenueDateHoWModule(revenue_priors, revenue_base_classes, seed), \"competitive_cpc\": CompetitiveCpcDateHoWModule(competitive_cpc_priors, competitive_cpc_base_classes, seed) } simulator = CompetitiveDateHowSimulator(state_set, action_set,", "module_class = RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes =", "= dates.copy() priors.loc[:, \"prior\"] = [{(): params}] * len(priors) base_classes = dates.copy() base_classes.loc[:,", "priors = dates.copy() priors.loc[:, \"prior\"] = [{(): params}] * len(priors) base_classes = dates.copy()", "ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \\ CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule", "ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from", "simulated day) T = 24 # note that this particular setup limits T", "policy classes from files from policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI #", "set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set", "ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors =", "pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] #", "'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names,", "Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] = [{(): params}] * len(priors) base_classes =", "action_set, attr_set = simulator_setup_1day() # build \"policies\" list that contains all bidding policies", "from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import", "dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set", "= ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors", "ConversionsDateHoWModule from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import", "without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02, \"rpv_default\": 300.0}})", "from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule", "priors, base_classes # Initialize auctions priors module_class = AuctionsPoissonModule Params = module_class.Params params", "# Initialize conversions priors module_class = ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0,", "['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set =", "conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize conversions priors module_class = ConversionsBinomialModule Params", "\\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from ssa_sim_v2.simulator.action import", "import AuctionsDateHoWModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from", "of production level policies that needs this extra step policies = [] policies.append(policy1)", "adding another policy2019 with different seed on-the-fly # Simulator will run 24 steps", "1 simulated day) T = 24 # note that this particular setup limits", "mods, date_from, date_to, income_share=1.0) return simulator, state_set, action_set, attr_set if __name__ == \"__main__\":", "Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize conversions priors", "a bare-bone sample policy that bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set,", "are written out for easier understanding and convenient debugging for your policies \"\"\"", "from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import", "policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is a bare-bone", "extra step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding", "Initialize revenue priors module_class = RevenueGammaNoiseModule Params = module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0)", "bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234) policy2.initialize({\"stp\": {\"cvr_default\": 0.02,", "enumerate(policies): p.learn(s, results[ix]) # each policy will learn with result # note that", "you simulator, state_set, action_set, attr_set = simulator_setup_1day() # build \"policies\" list that contains", "seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed), \"conversions\": ConversionsDateHoWModule(conversions_priors, conversions_base_classes, seed),", "* len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors auctions_base_classes", "CompetitiveClickProbabilityTwoClassGeometricModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import", "'20-29', '30-39', '40-49', '50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples()", "p.learn(s, results[ix]) # each policy will learn with result # note that policy", "2019 course \"\"\" import numpy as np import pandas as pd def simulator_setup_1day():", "for ix, p in enumerate(policies): p.learn(s, results[ix]) # each policy will learn with", "base_classes # Initialize auction_attributes priors module_class = AuctionAttributesModule Params = module_class.Params params =", "policies that needs this extra step policies = [] policies.append(policy1) policies.append(policy2) policies.append(Policy2019(state_set, action_set,", "auctions priors module_class = AuctionsPoissonModule Params = module_class.Params params = Params(auctions=100) priors =", "policy is one of production level policies that needs this extra step policies", "bare-bone sample policy that bids randomly without learning policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set,", "module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5) competitive_click_probability_priors, competitive_click_probability_base_classes \\ =", "responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix, p", "Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\ = initialize_priors(params, module_class)", "pd def simulator_setup_1day(): \"\"\" This is a tool to set up a simulator", "(t=0,1,...,23) (corresponding to 1 simulated day) T = 24 # note that this", "set, action set, and attribute set) :return: simulator, state set, action set, attribute", "ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module", "max_mod=9.0, min_mod=0.1) def initialize_priors(params, base_class): attr_combinations = list(attr_set.get_all_attr_tuples()) priors = dates.copy() priors.loc[:, \"prior\"]", "simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = [] for p in", "ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \\ AuctionAttributesDateHoWModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \\ VickreyAuctionDateHoWModule from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\", "= initialize_priors(params, module_class) # Initialize competitive_click_probability priors module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params", "how the bidding policies will interact with the simulator The codes are written", "build \"policies\" list that contains all bidding policies policy1 = Policy2019(state_set, action_set, attr_set,", "pandas as pd def simulator_setup_1day(): \"\"\" This is a tool to set up", "ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule", "module_class = ConversionsBinomialModule Params = module_class.Params params = Params(noise_level=0.0, noise_type=\"multiplicative\") conversions_priors, conversions_base_classes \\", "ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\", "state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set names", "module_class = CompetitiveClickProbabilityTwoClassGeometricModule Params = module_class.Params params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4,", "module_class.Params params = Params(avg_rpv=300.0, noise_level=100.0) revenue_priors, revenue_base_classes = initialize_priors( params, module_class) # Initialize", "\\ initialize_priors(params, module_class) # Module setup for the simulator mods = \\ {\"auctions\":", "= priors auctions_base_classes = base_classes # Initialize auction_attributes priors module_class = AuctionAttributesModule Params", "Initialize auction_attributes priors module_class = AuctionAttributesModule Params = module_class.Params params = Params(p=1.0) #", "VickreyAuctionModule Params = module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params, module_class)", "set, attribute set \"\"\" from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule", "= ['gender', 'age'] vals = {'gender': ['M', 'F', 'U'], 'age': ['0-19', '20-29', '30-39',", "from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule", "'50-59', '60-69', '70-*']} attr_set = AttrSet(names, vals) attr_combinations = attr_set.get_all_attr_tuples() # Initialize action", "24 steps (t=0,1,...,23) (corresponding to 1 simulated day) T = 24 # note", "policy testing script for ORF418 Spring 2019 course \"\"\" import numpy as np", "problem definition (state set, action set, and attribute set) :return: simulator, state set,", "\"2018-01-02\" tmp_df = pd.DataFrame(np.array(range(24)), columns=[\"hour_of_day\"]) tmp_df[\"key\"] = 1 dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=[\"date\"])", "[\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize attribute set names = ['gender', 'age'] vals", "and convenient debugging for your policies \"\"\" # import policy classes from files", "import \\ CompetitiveClicksBinomialModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule from ssa_sim_v2.simulator.modules.revenue.revenue_base_module", "competitive_click_probability_base_classes \\ = initialize_priors(params, module_class) # Initialize competitive_clicks priors module_class = CompetitiveClicksBinomialModule Params", "= dates[[\"date\", \"hour_of_week\"]] # Initialize state set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"],", "= dates[\"date\"].tolist() dates[\"key\"] = 1 dates = pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns:", "300.0}}) # this policy is one of production level policies that needs this", "initialize_priors(params, module_class) # Initialize revenue priors module_class = RevenueGammaNoiseModule Params = module_class.Params params", "pol_action = p.act(s) # each policy responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions))", "{\"cvr_default\": 0.02, \"rpv_default\": 300.0}}) # this policy is one of production level policies", "* len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class return priors, base_classes #", "competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8, fee=0.01) competitive_cpc_priors,", "ORF418 Spring 2019 course \"\"\" import numpy as np import pandas as pd", "24 # note that this particular setup limits T up to 48. T>48", "# Module setup for the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors, auctions_base_classes, seed),", "'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates =", "from policy2019 import Policy2019 from policy_thompson import PolicyThompsonSamplingSI # handy function that initializes", "params = Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] = [{(): params}] * len(priors)", "\"competitive_click_probability\": CompetitiveClickProbabilityDateHoWModule( competitive_click_probability_priors, competitive_click_probability_base_classes, seed), \"competitive_clicks\": CompetitiveClicksDateHoWModule(competitive_clicks_priors, competitive_clicks_base_classes, seed), \"conversion_rate\": ConversionRateDateHoWModule(conversion_rate_priors, conversion_rate_base_classes, seed),", "noise_type=\"multiplicative\") competitive_clicks_priors, competitive_clicks_base_classes \\ = initialize_priors(params, module_class) # Initialize conversion_rate priors module_class =", "that contains all bidding policies policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this", "= VickreyAuctionModule Params = module_class.Params params = Params() vickrey_auction_priors, vickrey_auction_base_classes \\ = initialize_priors(params,", "from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \\ AuctionAttributesModule from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule", "= Params(auctions=100) priors = dates.copy() priors.loc[:, \"prior\"] = [{(): params}] * len(priors) base_classes", "\\ = initialize_priors(params, module_class) # Initialize conversion_rate priors module_class = ConversionRateFlatModule Params =", "Initialize competitive_cpc priors module_class = CompetitiveCPCVickreyModule Params = module_class.Params params = Params(n_pos=8, fee=0.01)", "income_share=1.0) return simulator, state_set, action_set, attr_set if __name__ == \"__main__\": \"\"\" This script", "policy responds with a bid actions.append(pol_action) print(\" Actions={}\".format(actions)) results = simulator.step(actions) for ix,", "* 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]] # Initialize", "= pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class return", "\"prior\"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors)) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = base_class", "conversion_rate priors module_class = ConversionRateFlatModule Params = module_class.Params params = Params(cvr=0.02, noise_level=0.0, noise_type=\"multiplicative\")", "Initialize state set state_set = StateSet([\"date\", \"how\"], [\"discrete\", \"discrete\"], [dates_list, list(range(168))]) # Initialize", "= pd.to_datetime(dates[\"date\"]).dt.dayofweek * 24 + dates[\"hour_of_day\"] dates[\"date\"] = dates[\"date\"].dt.strftime(\"%Y-%m-%d\") dates = dates[[\"date\", \"hour_of_week\"]]", "params}] * len(priors) base_classes = dates.copy() base_classes.loc[:, \"base_class\"] = module_class auctions_priors = priors", "module_class) # Initialize revenue priors module_class = RevenueGammaNoiseModule Params = module_class.Params params =", "= pd.merge(dates, tmp_df, on=[\"key\"], how=\"left\") # columns: ['date', 'hour_of_day'] dates[\"hour_of_week\"] = pd.to_datetime(dates[\"date\"]).dt.dayofweek *", "noise_type=\"multiplicative\") conversion_rate_priors, conversion_rate_base_classes \\ = initialize_priors(params, module_class) # Initialize conversions priors module_class =", "import \\ CompetitiveClickProbabilityDateHoWModule from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \\ CompetitiveClicksDateHoWModule from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \\ ConversionRateDateHoWModule", "ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \\ CompetitiveCpcDateHoWModule from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator from ssa_sim_v2.simulator.state import StateSet from", "initialize_priors(params, module_class) # Module setup for the simulator mods = \\ {\"auctions\": AuctionsDateHoWModule(auctions_priors,", "= simulator.state print(\"t={} of {}\".format(t, T)) print(\" state={}\".format(simulator.state)) actions = [] for p", "== \"__main__\": \"\"\" This script shows how the bidding policies will interact with" ]
[ "2)) / K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) ->", "List import torch import math from tqdm.auto import tqdm from toma import toma", "from typing import List import torch import math from tqdm.auto import tqdm from", "N) candidate_indices = [] candidate_scores = [] if batch_size == 0: return CandidateBatch(candidate_scores,", "K, C, dtype=dtype, device=device) # We always keep these on the CPU. scores_N", "desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): nats_n_K_C =", "> 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum()", "def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None) -> CandidateBatch: N, K,", "- math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start)", "return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) ->", "* torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return", "1024) def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C", "def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N,", "int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C *", "- start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C", "0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1,", "torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int,", "end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1,", "C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False)", "batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N", "end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] =", "N) candidate_indices = [] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C)", "nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return", "- start) pbar.close() return entropies_N # Internal Cell # Not publishing these at", "entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N =", "typing import List import torch import math from tqdm.auto import tqdm from toma", "[] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices = torch.topk(scores_N, batch_size) return", "scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int,", "publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C", "from toma import toma from batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor)", "[] candidate_scores = [] if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N =", "List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None) -> CandidateBatch: N,", "dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end:", "==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return", "dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Cell @dataclass class CandidateBatch: scores:", "K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\",", "= ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import dataclass from", "desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1)", "batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype, device=device) # We always", "= probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K)", "leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C", "/ K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:", "0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Internal Cell #", "= -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell", "min(batch_size, N) candidate_indices = [] candidate_scores = [] if batch_size == 0: return", "tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C =", "= compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype, device=device) #", "scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) #", "compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double)", "compute(logits_n_K_C, start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C =", "probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = [] scores_N =", "# Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch: N, K,", "= probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1))", "probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = [] if batch_size", "start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C", "= probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024)", "'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import dataclass from typing import", "= torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1))", "mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close()", "= candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N", "candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None,", "batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = [] if batch_size ==", "candidate_indices = [] candidate_scores = [] if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices)", "Cell from dataclasses import dataclass from typing import List import torch import math", "* torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end -", "desc=\"BatchBALD\", leave=False): if i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1])", "entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Internal Cell # Not", "joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype, device=device) # We always keep these", "def compute(logits_n_K_C, start: int, end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C,", "dataclasses import dataclass from typing import List import torch import math from tqdm.auto", "from dataclasses import dataclass from typing import List import torch import math from", "the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N", "def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N,", "-> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices =", "= torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start:", "if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples,", "tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): nats_n_K_C", "'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import dataclass from typing import List import", "compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double)", "probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def", "import dataclass from typing import List import torch import math from tqdm.auto import", "__all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import dataclass", "[] if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy =", "always keep these on the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i", "tqdm.auto import tqdm from toma import toma from batchbald_redux import joint_entropy # Cell", "int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size = min(batch_size,", "device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices", "pbar.update(end - start) pbar.close() return entropies_N # Internal Cell # Not publishing these", "batch_size - 1, K, C, dtype=dtype, device=device) # We always keep these on", "scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None)", "shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices)", "CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C,", "to edit: 01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch']", "N, K, C = probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores", "- 1, K, C, dtype=dtype, device=device) # We always keep these on the", "int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end", "torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start:", "leave=False): if i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies", "dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end:", "K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N,", "compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double)", "entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C,", "import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C =", "these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C =", "List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None) ->", "= probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C,", "pbar.update(end - start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K,", "int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_(", "= tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C", "'get_bald_batch'] # Cell from dataclasses import dataclass from typing import List import torch", "# Cell @dataclass class CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size:", "logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close()", "AUTOGENERATED! DO NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified). __all__ =", "otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses", "torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int,", "Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N =", "torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N,", "= [] if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy", "torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start)", "start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C =", "Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): nats_n_K_C = probs_n_K_C", "= torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start:", "= probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = [] if", "start) pbar.close() return entropies_N # Internal Cell # Not publishing these at the", "batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C)", "import torch import math from tqdm.auto import tqdm from toma import toma from", "C, dtype=dtype, device=device) # We always keep these on the CPU. scores_N =", "keep these on the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in", "pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape", "== 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size -", "-= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item())", "at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape", "= [] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices", "Internal Cell # Not publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) ->", "desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): nats_n_K_C =", "File to edit: 01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch',", "conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return", "edit: 01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] #", "N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\",", "start: int, end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0.", "+ shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores,", "Not publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K,", "@toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) -", "return entropies_N # Internal Cell # Not publishing these at the moment. def", "mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C,", "= mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N", "-float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def", "= joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype, device=device) # We always keep", "candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -=", "return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K,", "= tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int):", "i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables(", "1024) def compute(probs_n_K_C, start: int, end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C", "for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0: latest_index = candidate_indices[-1]", "start: int, end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2))", "dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor)", "pbar.update(end - start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K,", "1024) def compute(logits_n_K_C, start: int, end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_(", "torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size", "K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\",", "mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N #", "specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import", "= conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf')", "dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int,", "[] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices =", "nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end -", "torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end", "logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024)", "mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C,", "# AUTOGENERATED! DO NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified). __all__", "N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional", "start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C", "-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C:", "Cell # Not publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:", "class CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int,", "indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None) -> CandidateBatch:", "pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape", "N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\",", "= probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = [] scores_N", "C = probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores = []", "num_samples: int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size =", "math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close()", "Cell @dataclass class CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int,", "torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N", "min(batch_size, N) candidate_indices = [] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N +=", "return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N", "tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C =", "nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close()", "N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional", "= min(batch_size, N) candidate_indices = [] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N", "candidate_scores = [] if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C)", "latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N)", "K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False)", "scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i", "'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import dataclass from typing import List", "= 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N", "= torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i >", "return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N", "pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int):", "['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from dataclasses import dataclass from typing", "shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] =", "# We always keep these on the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available())", "pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int):", "scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices = torch.topk(scores_N, batch_size) return CandidateBatch(candiate_scores.tolist(),", "C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C,", "these on the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size),", "entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def", "entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N def", "entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C,", "from tqdm.auto import tqdm from toma import toma from batchbald_redux import joint_entropy #", "probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024)", "CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None,", "tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index +", "on the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\",", "compute(probs_n_K_C, start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C)", "= torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C,", "= mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start)", "torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar", "0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C,", "math from tqdm.auto import tqdm from toma import toma from batchbald_redux import joint_entropy", "dtype=dtype, device=device) # We always keep these on the CPU. scores_N = torch.empty(N,", "compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype, device=device) # We", "batch_size: int, num_samples: int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape", "dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor)", "pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0: latest_index =", "Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): nats_n_K_C = logits_n_K_C", "* torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Cell", "dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size = min(batch_size, N)", "if i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies =", "in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index", "candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices = torch.topk(scores_N,", "scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item())", "candidate_score, candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C:", "probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end", "# Internal Cell # Not publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor)", "import toma from batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:", "entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Cell @dataclass class", "import math from tqdm.auto import tqdm from toma import toma from batchbald_redux import", "dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Internal Cell # Not publishing", "conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score,", "int, num_samples: int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size", "joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape", "batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size", "pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end:", "dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int,", "@dataclass class CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples:", "compute(probs_n_K_C, start: int, end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] =", "get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape", "tqdm from toma import toma from batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C:", "- start) pbar.close() return entropies_N # Cell @dataclass class CandidateBatch: scores: List[float] indices:", "torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar", "C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C,", "def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch: N, K, C =", "1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices]", "= logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C,", "candidate_indices) conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype,", "-> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar =", "device=device) # We always keep these on the CPU. scores_N = torch.empty(N, dtype=torch.double,", "pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end:", "# Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N", "def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C", "the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False):", "i > 0: latest_index = candidate_indices[-1] batch_joint_entropy.add_variables( probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[", "@toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C)", "torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N,", "start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C =", "= [] candidate_scores = [] if batch_size == 0: return CandidateBatch(candidate_scores, candidate_indices) conditional_entropies_N", "DO NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy',", "K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N,", "torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N", "nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end -", "batch_size: int, dtype=None, device=None) -> CandidateBatch: N, K, C = probs_N_K_C.shape batch_size =", "nats_n_C[mean_probs_n_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N #", "CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if", "1024) def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K)", "K, C = probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = [] candidate_scores =", "entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C:", "= torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C,", "2)) / K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) ->", "torch import math from tqdm.auto import tqdm from toma import toma from batchbald_redux", "compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double)", "torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Cell @dataclass", "# Cell from dataclasses import dataclass from typing import List import torch import", "CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch:", "compute(logits_n_K_C, start: int, end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1,", "return entropies_N # Cell @dataclass class CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C:", "logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def", "def compute(probs_n_K_C, start: int, end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0]", "- start) pbar.close() return entropies_N def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C", "# Not publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N,", "torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start:", "(unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell from", "probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end", "from batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K,", "@toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C)", "conditional_entropies_N = compute_conditional_entropy(probs_N_K_C) batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples, batch_size - 1, K, C, dtype=dtype, device=device)", "= logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start)", "torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0:", "import List import torch import math from tqdm.auto import tqdm from toma import", "probs_N_K_C[latest_index:latest_index + 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N +", "CandidateBatch: N, K, C = probs_N_K_C.shape batch_size = min(batch_size, N) candidate_indices = []", "leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): nats_n_K_C = logits_n_K_C *", "0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return entropies_N def", "entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def", "Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch: N, K, C", "+ 1]) shared_conditinal_entropies = conditional_entropies_N[ candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies", "/ K) pbar.update(end - start) pbar.close() return entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:", "import tqdm from toma import toma from batchbald_redux import joint_entropy # Cell def", "pbar.update(end - start) pbar.close() return entropies_N # Cell @dataclass class CandidateBatch: scores: List[float]", "dim=1) - math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C) entropies_N[start:end].copy_( -torch.sum(nats_n_C, dim=1)) pbar.update(end -", "get_batchbald_batch(probs_N_K_C: torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None) -> CandidateBatch: N, K, C", "leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): nats_n_K_C = probs_n_K_C *", "= scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size:", "K, C = probs_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False)", "We always keep these on the CPU. scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available()) for", "int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0] = 0.", "end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K)", "-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Cell @dataclass class CandidateBatch:", "def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C *", "= [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices = torch.topk(scores_N, batch_size)", "entropies_N def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N =", "torch.Tensor, batch_size: int, num_samples: int, dtype=None, device=None) -> CandidateBatch: N, K, C =", "= logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024)", "candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None) -> CandidateBatch: N,", "nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) /", "dataclass from typing import List import torch import math from tqdm.auto import tqdm", "def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = probs_N_K_C.shape entropies_N = torch.empty(N,", "end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K) nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C)", "toma from batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N,", "= min(batch_size, N) candidate_indices = [] candidate_scores = [] if batch_size == 0:", "batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index =", "@toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C =", "entropies_N # Cell @dataclass class CandidateBatch: scores: List[float] indices: List[int] def get_batchbald_batch(probs_N_K_C: torch.Tensor,", "==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Internal", "01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch'] # Cell", "= -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores, candidate_indices = torch.topk(scores_N, batch_size) return CandidateBatch(candiate_scores.tolist(), candidate_indices.tolist())", "start) pbar.close() return entropies_N # Cell @dataclass class CandidateBatch: scores: List[float] indices: List[int]", "output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index = scores_N.max(dim=0)", "= tqdm(total=N, desc=\"Entropy\", leave=False) @toma.execute.chunked(probs_N_K_C, 1024) def compute(probs_n_K_C, start: int, end: int): mean_probs_n_C", "desc=\"Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C,", "int, end: int): nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) /", "int, end: int): mean_probs_n_C = probs_n_K_C.mean(dim=1) nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C) nats_n_C[mean_probs_n_C ==0]", "batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C", "C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar = tqdm(total=N, desc=\"Conditional Entropy\", leave=False)", "dtype=torch.double, pin_memory=torch.cuda.is_available()) for i in tqdm(range(batch_size), desc=\"BatchBALD\", leave=False): if i > 0: latest_index", "candidate_indices].sum() batch_joint_entropy.compute_batch(probs_N_K_C, output_entropies_B=scores_N) scores_N -= conditional_entropies_N + shared_conditinal_entropies scores_N[candidate_indices] = -float('inf') candidate_score, candidate_index", "NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy',", "toma import toma from batchbald_redux import joint_entropy # Cell def compute_conditional_entropy(probs_N_K_C: torch.Tensor) ->", "int, end: int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C,", "tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): nats_n_K_C", "def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N,", "candidate_indices = [] candidate_scores = [] scores_N = -compute_conditional_entropy(probs_N_K_C) scores_N += compute_entropy(probs_N_K_C) candiate_scores,", "= tqdm(total=N, desc=\"Conditional Entropy\", leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int):", "moment. def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N =", "leave=False) @toma.execute.chunked(logits_N_K_C, 1024) def compute(logits_n_K_C, start: int, end: int): mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1)", "1, K, C, dtype=dtype, device=device) # We always keep these on the CPU.", "-> torch.Tensor: N, K, C = logits_N_K_C.shape entropies_N = torch.empty(N, dtype=torch.double) pbar =", "= 0. entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1)) pbar.update(end - start) pbar.close() return entropies_N # Internal Cell", "* torch.exp(logits_n_K_C) entropies_N[start:end].copy_( -torch.sum(nats_n_K_C, dim=(1, 2)) / K) pbar.update(end - start) pbar.close() return", "pbar.close() return entropies_N # Cell @dataclass class CandidateBatch: scores: List[float] indices: List[int] def", "pbar.close() return entropies_N # Internal Cell # Not publishing these at the moment.", "EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified). __all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch',", "candidate_index = scores_N.max(dim=0) candidate_indices.append(candidate_index.item()) candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor,", "int): nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C) nats_n_K_C[probs_n_K_C ==0] = 0. entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2))", "candidate_scores.append(candidate_score.item()) return CandidateBatch(candidate_scores, candidate_indices) # Cell def get_bald_batch(probs_N_K_C: torch.Tensor, batch_size: int, dtype=None, device=None)", "entropies_N # Internal Cell # Not publishing these at the moment. def compute_conditional_entropy_from_logits(logits_N_K_C:" ]
[ "image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\"", "if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\" and", "\\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\";", "-name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo", "ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax !=", "\"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\" and float(zmax) > -0.861:", "\"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\" and float(zmax) > -0.861: #print(nti+\" \"+zmax);", "pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;", "\"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image);", "!= \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\" and float(zmax) >", "in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax", "cmd=\"\\nfind . -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")];", "#/usr/bin/python import re; import subprocess; cmd=\"\\nfind . -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for", "zmax != \"0\" and float(zmax) > -0.861: #print(nti+\" \"+zmax); exit(); \"\"\" exit(); \"\"\"", "#if zmax != \"0\" and float(zmax) > -0.861: #print(nti+\" \"+zmax); exit(); \"\"\" exit();", "info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax", "vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\"", "for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip();", "zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\" and float(zmax)", "\"+image); #if zmax != \"0\" and float(zmax) > -0.861: #print(nti+\" \"+zmax); exit(); \"\"\"", ". -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\";", "ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip();", "import subprocess; cmd=\"\\nfind . -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis:", "jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\":", "print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\" and float(zmax) > -0.861: #print(nti+\"", "zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax != \"0\"", "nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if", "\"+vdir+\" \"+image); #if zmax != \"0\" and float(zmax) > -0.861: #print(nti+\" \"+zmax); exit();", "pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9]; vdir=nti[nti.find(\"/\")+1:nti.rfind(\"/\")]; image=\"data_more/\"+nti[nti.rfind(\"/\")+1:nti.find(\"_cut\")]+\".grd\"; cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close();", "re; import subprocess; cmd=\"\\nfind . -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in", "pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if zmax !=", "import re; import subprocess; cmd=\"\\nfind . -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti", "pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\" \"+image); #if", "cmd=\"\\ngrdinfo \"+nti+\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; info=pipe.read().strip(); pipe.close(); zmax=info[re.search(\"z_max:\\s*\",info).end(0):re.search(\"z_max:\\s*\\S*\\s*\",info).end(0)].strip(); if zmax != \"0\": print(jday+\" \"+zmax+\" \"+vdir+\"", "subprocess; cmd=\"\\nfind . -name \\\"*nti21_cut.grd\\\"\\n\"; pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout; ntis=pipe.read().split(); pipe.close(); for nti in ntis: jday=nti[nti.find(\".A\")+6:nti.find(\".A\")+9];" ]
[]
[ "= QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20,", "res2==None: format_str = \"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str", "res[3] print \"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now =", "datetime import string import random import shutil from time import gmtime, strftime, sleep", "= QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__", "os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w =", "= os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path", "global current_userfname detected_personid = '' welcome_names='' ramp_frames = 10 print \"Face identification started", "frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0],", "self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2)", "QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt)", "cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to generate a random id", "+ str(detected_personid) comm = \"SELECT * FROM %s WHERE personid = '%s'\" %", "(x, y), (x + w, y + h), (0, 255, 0), 2) frame", "minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image = %d \" %", "range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + '", "len(faces) #self.message_label.setText(\"Total Faces in Image = %d \" % len(faces)) if len(faces) >", "self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if", "else: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255),", "self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval)", "small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor", "faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h)", "= \"SELECT * FROM %s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res =", "detected_persons_cnt > 1: for i in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] +", "+ h), (0, 255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame,", "minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image = %d \"", "imageUpload.py for uploading captured images to cloudinary import imageUpload as imup # import", "import * from PyQt4.QtGui import * from PyQt4.QtWebKit import * import datetime import", "VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance marked for user %s", "db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname", "% (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm = \"SELECT *", "if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global", "capture_interval=30 camera_port = 0 font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3", "from PyQt4.QtGui import * from PyQt4.QtWebKit import * import datetime import string import", "self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20)", "\"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for i in range(detected_persons_cnt-1):", "msface api calls import MSFaceAPI as msface large_text_size = 22 medium_text_size = 14", "QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces", "detected_personid = '' welcome_names='' ramp_frames = 10 print \"Face identification started ..........\" cv2.destroyAllWindows()", "\" + str(file) print \"faceId = \" + str(faceid) detected_personid = msface.face_identify(faceid) if", "comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO users_present (id, userid) VALUES", "#image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888)", "Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80,", "Faces in Image = %d \" % len(faces)) if len(faces) > 0: detected_persons", "and date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if", "global current_userid global current_userfname detected_personid = '' welcome_names='' ramp_frames = 10 print \"Face", "calls import MSFaceAPI as msface large_text_size = 22 medium_text_size = 14 small_text_size =", "welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' +", "''.join(random.choice(chars) for _ in range(size)) def make_dir(path): try: os.makedirs(path) except OSError: if not", "as e: print \"Errors occured !\" print e class FullscreenWindow: def __init__(self, parent,", "print \"detected_personid = \" + str(detected_personid) comm = \"SELECT * FROM %s WHERE", "%s \" % uname else print \"Attendance already marked for user %s \"", "= [] persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h) in faces: if w*h>500:", "= sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) #", "w = FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the running program # killall", "%d \" % persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/'", "k=0 for (x, y, w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y),", "QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ ==", "current_userfname detected_personid = '' welcome_names='' ramp_frames = 10 print \"Face identification started ..........\"", "comm = \"SELECT * FROM users_present WHERE userid = %d and date =", "\"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm", "self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter)", "self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image)", "base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg')", "self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message')", "unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset'", "current_userfname = '' detection_interval=10000 capture_interval=30 camera_port = 0 font1 = QFont('Helvetica', small_text_size) font2", "msface.face_identify(faceid) if detected_personid: print \"detected_personid = \" + str(detected_personid) comm = \"SELECT *", "cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print", "from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtWebKit import * import", "import random import shutil from time import gmtime, strftime, sleep import sqlite3 #", "DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path)", "self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image =", "d)) a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the", "file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \" % persons_cnt", "\"SELECT * FROM %s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm)", "if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name =", "image\" k=0 for (x, y, w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x,", "shutil from time import gmtime, strftime, sleep import sqlite3 # import imageUpload.py for", "%d and date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone()", "% int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def", "in Image = %d \" % len(faces) #self.message_label.setText(\"Total Faces in Image = %d", "__name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path): for f", "if self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self)", "self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global", "ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(", "in Image = %d \" % len(faces)) if len(faces) > 0: detected_persons =", "import shutil from time import gmtime, strftime, sleep import sqlite3 # import imageUpload.py", "not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI()", "cv2 from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtWebKit import *", "= QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds'", "h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame,", "self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows()", "placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid", "= 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image", "**kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream =", "font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor =", "cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)", "def stop_capture(self): if self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer", "uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt >", "api calls import MSFaceAPI as msface large_text_size = 22 medium_text_size = 14 small_text_size", "faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w, y + h), (0,", "[] persons = [] persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h) in faces:", "'+ welcome_names) else: self.label2.setText('') print \"No person in image\" k=0 for (x, y,", "+ string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size)) def make_dir(path): try: os.makedirs(path) except", "self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter)", "QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face", "person %d \" % persons_cnt print \"Image File = \" + str(file) print", "f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w = FullscreenWindow(a)", "255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y),", "string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size)) def make_dir(path): try: os.makedirs(path) except OSError:", "#print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO users_present (id, userid)", "0: detected_persons = [] persons = [] persons_cnt=0 detected_persons_cnt=0 for (x, y, w,", "\"SELECT * FROM users_present WHERE userid = %d and date = '%s' \"", "person in image\" k=0 for (x, y, w, h) in faces: if persons[k]!='Unknown':", "self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing:", "%s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone()", "print e class FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen()", "y + h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1)", "QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the running program #", "OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame,", "persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x + w, y", "= msface.face_identify(faceid) if detected_personid: print \"detected_personid = \" + str(detected_personid) comm = \"SELECT", "= QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor()", "d in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) #", "in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names +", ", 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x + w, y + h), (0,", "% len(faces)) if len(faces) > 0: detected_persons = [] persons = [] persons_cnt=0", "= id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name)", "= QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn =", "self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2)", "detected_persons_cnt=0 for (x, y, w, h) in faces: if w*h>500: persons_cnt+=1 image_crop =", "def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port)", "large_text_size = 22 medium_text_size = 14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path", "self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video", "faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),", "Image = %d \" % len(faces) #self.message_label.setText(\"Total Faces in Image = %d \"", "= faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in", "ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5,", "self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80),", "conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath)", "gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image =", "+ w, y + h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL", "cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e:", "in range(size)) def make_dir(path): try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise class", "QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors occured !\" print e class", "FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the running program # killall -9 python", "persons = [] persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h) in faces: if", "2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1],", "WHERE userid = %d and date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print", "detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No person in image\" k=0 for", "PyQt4.QtWebKit import * import datetime import string import random import shutil from time", "self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter)", "detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No person in", ", 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1],", "%d \" % len(faces) #self.message_label.setText(\"Total Faces in Image = %d \" % len(faces))", "= QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream)", "if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255,", "small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp')", "xrange(ramp_frames): s, im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray =", "+ w, y + h), (0, 255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)", "'' detection_interval=10000 capture_interval=30 camera_port = 0 font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica',", "display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1,", "= res[0] uname = res[1] fname = res[2] lname = res[3] print \"Welcome", "try: for i in xrange(ramp_frames): s, im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image", "20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check()", "255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image =", "lname = res[3] print \"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname)", "string import random import shutil from time import gmtime, strftime, sleep import sqlite3", "def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2)", "'%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str =", "self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname detected_personid = '' welcome_names='' ramp_frames =", "Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20,", "import string import random import shutil from time import gmtime, strftime, sleep import", "res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\"", "= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0],", "# import imageUpload.py for uploading captured images to cloudinary import imageUpload as imup", "import gmtime, strftime, sleep import sqlite3 # import imageUpload.py for uploading captured images", "MSFaceAPI as msface large_text_size = 22 medium_text_size = 14 small_text_size = 10 base_path", "self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing = False", "print \"No person in image\" k=0 for (x, y, w, h) in faces:", "self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based Surveillance') self.label2", "(x, y, w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x +", "self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image))", "if len(faces) > 0: detected_persons = [] persons = [] persons_cnt=0 detected_persons_cnt=0 for", "import cv2 from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtWebKit import", "self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450)", "= %d \" % len(faces)) if len(faces) > 0: detected_persons = [] persons", "print \"faceId = \" + str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid", "WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone() if", "import sqlite3 # import imageUpload.py for uploading captured images to cloudinary import imageUpload", "id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size)) def", "y), (x + w, y + h), (0, 255, 0), 2) frame =", "0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image = QImage(frame,", "update_dynamic_frame(self): global current_userid global current_userfname detected_personid = '' welcome_names='' ramp_frames = 10 print", "\"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print", "= QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing", "strftime, sleep import sqlite3 # import imageUpload.py for uploading captured images to cloudinary", "i in xrange(ramp_frames): s, im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1", "self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox", "self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1)", "make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root,", "self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 =", "self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream)", "QLabel('Real-Time Face Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox", "\"Result for person %d \" % persons_cnt print \"Image File = \" +", "= cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream')", "cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s, im = self.capture.read() ret,frame = self.capture.read()", "% persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name", "14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path =", "image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d", "%s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm =", "self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox =", "self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid", "initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image", "detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm = \"SELECT * FROM users_present WHERE userid", "def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size))", "e: print \"Errors occured !\" print e class FullscreenWindow: def __init__(self, parent, *args,", "+ ' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+", "person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for i in range(detected_persons_cnt-1): welcome_names", "raise class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0", "now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO users_present (id,", "i in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names", "image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors occured !\" print e", "def __init__(self, parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal)", "cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image))", "in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command", "cloudinary import imageUpload as imup # import MSFaceAPI.py for msface api calls import", "20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter)", ") print \"Total Faces in Image = %d \" % len(faces) #self.message_label.setText(\"Total Faces", "\" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT", "= os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset", "welcome_names = welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' &", "w, y + h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL ,", "current_userid = 0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port = 0 font1 =", "persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file,", "= 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = '' detection_interval=10000 capture_interval=30", "generate a random id for image file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits", "utf-8 -*- import sys import os import cv2 from PyQt4.QtCore import * from", "marked for user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person", "print \"Errors occured !\" print e class FullscreenWindow: def __init__(self, parent, *args, **kwargs):", "import os import cv2 from PyQt4.QtCore import * from PyQt4.QtGui import * from", "self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter)", "= 0 font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica',", "[] persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h) in faces: if w*h>500: persons_cnt+=1", "self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4)", "= %d and date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm)", "+ ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0:", "\"Errors occured !\" print e class FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt", "comm = \"SELECT * FROM %s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res", "= \"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit()", "file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop)", "1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0,", "20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based Surveillance') self.label2 = QLabel('')", "parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4", "if res2==None: format_str = \"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print", "coding: utf-8 -*- import sys import os import cv2 from PyQt4.QtCore import *", "__init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def", "def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray", "else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1:", "for (x, y, w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x", "files in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f)) for d in dirs:", "print \"Result for person %d \" % persons_cnt print \"Image File = \"", "else print \"Attendance already marked for user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\",", "(0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x,", "as msface large_text_size = 22 medium_text_size = 14 small_text_size = 10 base_path =", "print \"Face identification started ..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s, im", "res: userid = res[0] uname = res[1] fname = res[2] lname = res[3]", "in faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt)", "* import datetime import string import random import shutil from time import gmtime,", "for _ in range(size)) def make_dir(path): try: os.makedirs(path) except OSError: if not os.path.isdir(path):", "capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame()", "res = cursor.execute(comm) res = cursor.fetchone() if res: userid = res[0] uname =", "= cv2.CascadeClassifier(cascPath) # function to generate a random id for image file name", "\" % persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' +", "(0, 255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image", "a random id for image file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits +", "users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance marked", "shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate", "self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20,", "uname = res[1] fname = res[2] lname = res[3] print \"Welcome %s !\"", "TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to generate a random", "= os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid =", "detected_persons = [] persons = [] persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h)", "f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) a =", "File = \" + str(file) print \"faceId = \" + str(faceid) detected_personid =", "= datetime.datetime.now() comm = \"SELECT * FROM users_present WHERE userid = %d and", "\"No person in image\" k=0 for (x, y, w, h) in faces: if", "in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w, y + h),", "self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def", "-*- import sys import os import cv2 from PyQt4.QtCore import * from PyQt4.QtGui", "self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname detected_personid = ''", "= self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces", "super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel()", "image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...')", "persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),", "users_present WHERE userid = %d and date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\"))", "20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox)", "'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port = 0 font1", "frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if", "= \" + str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid = \"", "Exception as e: print \"Errors occured !\" print e class FullscreenWindow: def __init__(self,", "print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for i in", "Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout()", "+ detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print", "self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout()", "stop_capture(self): if self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer =", "ramp_frames = 10 print \"Face identification started ..........\" cv2.destroyAllWindows() try: for i in", "+ string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size)) def make_dir(path): try:", "faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name", "**kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout()", "2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x +", "in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0", "QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors occured", "occured !\" print e class FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt =", "0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image", "MSFaceAPI.py for msface api calls import MSFaceAPI as msface large_text_size = 22 medium_text_size", "def update_dynamic_frame(self): global current_userid global current_userfname detected_personid = '' welcome_names='' ramp_frames = 10", "res[1] fname = res[2] lname = res[3] print \"Welcome %s !\" % (fname+'", "os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for", "= 14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path", "!\" print e class FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt = QWidget()", "!\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm = \"SELECT", "based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter)", "self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label)", "identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname", "Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter)", "flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image = %d \" % len(faces) #self.message_label.setText(\"Total", "% (TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone() if res: userid = res[0]", "welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0]", "detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid = \" + str(detected_personid) comm =", "int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self):", "= False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def", "len(faces) > 0: detected_persons = [] persons = [] persons_cnt=0 detected_persons_cnt=0 for (x,", "'+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm = \"SELECT * FROM users_present", "DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture =", "for user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\"", "*args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream", "function to generate a random id for image file name def id_generator(size=20, chars=string.ascii_lowercase", "self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__':", "gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE", "cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)", "print \"Image File = \" + str(file) print \"faceId = \" + str(faceid)", "minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame, (x,", "import * import datetime import string import random import shutil from time import", "__init__(self, parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0", "self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox =", "self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame", "from time import gmtime, strftime, sleep import sqlite3 # import imageUpload.py for uploading", "% uname else print \"Attendance already marked for user %s \" % uname", "range(size)) def make_dir(path): try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget):", "QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True)", "already marked for user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown", "str(detected_personid) comm = \"SELECT * FROM %s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid)", "self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self):", "imageUpload as imup # import MSFaceAPI.py for msface api calls import MSFaceAPI as", "QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds' %", "%s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown')", "user %s \" % uname else print \"Attendance already marked for user %s", "2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame,", "w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w, y", "scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image = %d", "file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print", "persons.append('Unknown') if detected_persons_cnt > 1: for i in range(detected_persons_cnt-1): welcome_names = welcome_names +", "self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20)", "font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath =", "self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout()", "uname else print \"Attendance already marked for user %s \" % uname else:", "imup # import MSFaceAPI.py for msface api calls import MSFaceAPI as msface large_text_size", "sleep import sqlite3 # import imageUpload.py for uploading captured images to cloudinary import", "% len(faces) #self.message_label.setText(\"Total Faces in Image = %d \" % len(faces)) if len(faces)", "def make_dir(path): try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget): def", "(fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm = \"SELECT * FROM", "QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing =", "#self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp +", "for i in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2]", "_ in range(size)) def make_dir(path): try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise", "h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0,", "self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time", "for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w,", "= 10 print \"Face identification started ..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames):", "self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based", "parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self):", "datetime.datetime.now() comm = \"SELECT * FROM users_present WHERE userid = %d and date", "%d \" % persons_cnt print \"Image File = \" + str(file) print \"faceId", "= self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80,", "conn.commit() print \"Attendance marked for user %s \" % uname else print \"Attendance", "(x + w, y + h), (0, 255, 0), 2) frame = cv2.cvtColor(frame,", "+ '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d", "= cursor.execute(comm) res = cursor.fetchone() if res: userid = res[0] uname = res[1]", "sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function", "'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port", "' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names =", "% persons_cnt print \"Image File = \" + str(file) print \"faceId = \"", "welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No person in image\"", "20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based Surveillance') self.label2 =", "cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read()", "(NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance marked for user %s \"", "cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)", "marked for user %s \" % uname else print \"Attendance already marked for", "cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for i in range(detected_persons_cnt-1): welcome_names = welcome_names", "= '' welcome_names='' ramp_frames = 10 print \"Face identification started ..........\" cv2.destroyAllWindows() try:", "fname = res[2] lname = res[3] print \"Welcome %s !\" % (fname+' '+lname)", "self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft)", "str(file) print \"faceId = \" + str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print", "conn.execute(format_str) conn.commit() print \"Attendance marked for user %s \" % uname else print", "image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors occured !\"", "self.label2.setText('') print \"No person in image\" k=0 for (x, y, w, h) in", "res[2] lname = res[3] print \"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname)", "= QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml'", "*args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 =", "seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1", "welcome_names) else: self.label2.setText('') print \"No person in image\" k=0 for (x, y, w,", "os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d))", "elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No person", "return ''.join(random.choice(chars) for _ in range(size)) def make_dir(path): try: os.makedirs(path) except OSError: if", "self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2)", "= '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str", "self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80),", "self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight)", "= cursor.fetchone() if res: userid = res[0] uname = res[1] fname = res[2]", "make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f))", "w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name = id_generator()+'.jpg'", "faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image", "= conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to generate", "% uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt", "self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20,", "dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db')", "(x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y", "PyQt4.QtGui import * from PyQt4.QtWebKit import * import datetime import string import random", "frame = cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next", "= '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone() if res: userid", "userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance marked for user", "* from PyQt4.QtGui import * from PyQt4.QtWebKit import * import datetime import string", "* FROM %s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res", "= self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray,", "cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total", "' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names)", "for d in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_())", "detection_interval=10000 capture_interval=30 camera_port = 0 font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size)", "h) in faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" %", "self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20,", "> 0: detected_persons = [] persons = [] persons_cnt=0 detected_persons_cnt=0 for (x, y,", "import MSFaceAPI.py for msface api calls import MSFaceAPI as msface large_text_size = 22", "image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \" % persons_cnt print \"Image", "cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x + w,", "imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \" % persons_cnt print \"Image File", "80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y),", "image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors occured !\" print", "if detected_personid: print \"detected_personid = \" + str(detected_personid) comm = \"SELECT * FROM", "import sys import os import cv2 from PyQt4.QtCore import * from PyQt4.QtGui import", "h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w, y +", "y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y +", "h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame,", "class FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0))", "conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to generate a", "= res[1] fname = res[2] lname = res[3] print \"Welcome %s !\" %", "y), (x + w, y + h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x,", "= cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image", "sys import os import cv2 from PyQt4.QtCore import * from PyQt4.QtGui import *", "(x, y), (x + w, y + h), (0, 0, 255), 2) cv2.putText(frame,", "= [] persons = [] persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h) in", "(x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(frame,", "self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450)", "= faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w,", "in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root,", "(TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone() if res: userid = res[0] uname", "self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No person in image\" k=0 for (x,", "captured images to cloudinary import imageUpload as imup # import MSFaceAPI.py for msface", "+ h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1", "= QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox)", "time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for", "os import cv2 from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtWebKit", "y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x + w, y +", "gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h) in", "frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp", "os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp'", "id for image file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return", "for uploading captured images to cloudinary import imageUpload as imup # import MSFaceAPI.py", "class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture", "res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid))", "& ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else:", "1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image))", "personid = '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone() if res:", "faceCascade = cv2.CascadeClassifier(cascPath) # function to generate a random id for image file", "self.label1 = QLabel('Real-Time Face Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3)", "self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1)", "* from PyQt4.QtWebKit import * import datetime import string import random import shutil", "print \"Attendance already marked for user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime())", "cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port =", "persons_cnt=0 detected_persons_cnt=0 for (x, y, w, h) in faces: if w*h>500: persons_cnt+=1 image_crop", "y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image =", "(x + w, y + h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10),", "self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame =", "if detected_persons_cnt > 1: for i in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i]", "y + h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1)", "os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path =", "= \" + str(detected_personid) comm = \"SELECT * FROM %s WHERE personid =", "self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root,", "y + h), (0, 255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame =", "cv2.CascadeClassifier(cascPath) # function to generate a random id for image file name def", "self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname detected_personid = '' welcome_names=''", "detected_personid: print \"detected_personid = \" + str(detected_personid) comm = \"SELECT * FROM %s", "'%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res = cursor.fetchone() if res: userid =", "cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter)", "self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors occured !\" print e class FullscreenWindow:", "self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for", "Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2)", "%d \" % len(faces)) if len(faces) > 0: detected_persons = [] persons =", "k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0],", "in xrange(ramp_frames): s, im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray", "cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \" % persons_cnt print", "= '' detection_interval=10000 capture_interval=30 camera_port = 0 font1 = QFont('Helvetica', small_text_size) font2 =", "self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces =", "= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE )", "self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition based Surveillance')", "in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255,", "10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image =", "\" % persons_cnt print \"Image File = \" + str(file) print \"faceId =", "import * from PyQt4.QtWebKit import * import datetime import string import random import", "minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame,", "print \"Total Faces in Image = %d \" % len(faces) #self.message_label.setText(\"Total Faces in", "cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image,", "cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)", "', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names", "(int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO users_present", "self).__init__() self.initUI() self.counter=0 self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True)", "== '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path): for f in", "= os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp =", "h), (0, 255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1)", "self.capture_cnt=0 self.capture = cv2.VideoCapture(camera_port) def initUI(self): self.video_stream = QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live", "self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter)", "frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval):", "= res[2] lname = res[3] print \"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1", "= QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)", "scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h) in faces:", "self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started", "print \"Attendance marked for user %s \" % uname else print \"Attendance already", "cursor.execute(comm) res = cursor.fetchone() if res: userid = res[0] uname = res[1] fname", "format_str = \"INSERT INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str)", "gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for i", "os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns') tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset =", "(0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1)", "dirs, files in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f)) for d in", "#!/usr/bin/python # -*- coding: utf-8 -*- import sys import os import cv2 from", "QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label)", "as imup # import MSFaceAPI.py for msface api calls import MSFaceAPI as msface", "= detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No person in image\" k=0", "= %d \" % len(faces) #self.message_label.setText(\"Total Faces in Image = %d \" %", "self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in", "#print format_str conn.execute(format_str) conn.commit() print \"Attendance marked for user %s \" % uname", "for root, dirs, files in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f)) for", "medium_text_size) font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath", "y, w, h) in faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d", "image file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for", "= os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname =", "...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname detected_personid =", "faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \" % persons_cnt print \"Image File =", "self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe)", "+ str(file) print \"faceId = \" + str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid:", "= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception", "False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self):", "\" % len(faces)) if len(faces) > 0: detected_persons = [] persons = []", "cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person", "255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.flip(frame, 1) image =", "y, w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame, (x, y), (x + w,", "faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y,", "Image = %d \" % len(faces)) if len(faces) > 0: detected_persons = []", "(x, y, w, h) in faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing..", "+ h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else:", "= QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print \"Errors", "persons_cnt print \"Image File = \" + str(file) print \"faceId = \" +", "+ w, y + h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL", "-*- coding: utf-8 -*- import sys import os import cv2 from PyQt4.QtCore import", "= DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path)", "from PyQt4.QtWebKit import * import datetime import string import random import shutil from", "\"Image File = \" + str(file) print \"faceId = \" + str(faceid) detected_personid", "files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w", "e class FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette()", "persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image", "persons.append(fname) now = datetime.datetime.now() comm = \"SELECT * FROM users_present WHERE userid =", "self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout()", "sqlite3 # import imageUpload.py for uploading captured images to cloudinary import imageUpload as", "self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout()", "= frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name = id_generator()+'.jpg' file = os.path.join(tmp_path,file_name)", "\"Attendance marked for user %s \" % uname else print \"Attendance already marked", "except Exception as e: print \"Errors occured !\" print e class FullscreenWindow: def", "# -*- coding: utf-8 -*- import sys import os import cv2 from PyQt4.QtCore", "FullscreenWindow: def __init__(self, parent, *args, **kwargs): self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255))", "welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello", "os.makedirs(path) except OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent, *args,", "cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x,", "QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path)", "= QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe =", "to generate a random id for image file name def id_generator(size=20, chars=string.ascii_lowercase +", "w, h) in faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \"", "res = cursor.fetchone() if res: userid = res[0] uname = res[1] fname =", "cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as", "identification started ..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s, im = self.capture.read()", "cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = '' detection_interval=10000", "= \" + str(file) print \"faceId = \" + str(faceid) detected_personid = msface.face_identify(faceid)", "msface large_text_size = 22 medium_text_size = 14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__))", "PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtWebKit import * import datetime", "medium_text_size = 14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset') unknown_user_path=os.path.join(base_path,'unknowns')", "else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname detected_personid = '' welcome_names='' ramp_frames", "to cloudinary import imageUpload as imup # import MSFaceAPI.py for msface api calls", "if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path): for", "Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results')", "if res: userid = res[0] uname = res[1] fname = res[2] lname =", "self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox", "now = datetime.datetime.now() comm = \"SELECT * FROM users_present WHERE userid = %d", "root, dirs, files in os.walk(tmp_path): for f in files: os.unlink(os.path.join(root, f)) for d", "for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) a", "= 22 medium_text_size = 14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path =", "random id for image file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase):", "(id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance marked for", "in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv)", "def display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray,", "\"faceId = \" + str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid =", "self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if", "\" % uname else print \"Attendance already marked for user %s \" %", "INTO users_present (id, userid) VALUES (NULL,%d)\" %(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance", "Faces in Image = %d \" % len(faces) #self.message_label.setText(\"Total Faces in Image =", "self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20,", "os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__() self.initUI() self.counter=0", "Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture", "self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1,", "try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent,", "= QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs,", "in image\" k=0 for (x, y, w, h) in faces: if persons[k]!='Unknown': cv2.rectangle(frame,", "self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last", "self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout()", "for i in xrange(ramp_frames): s, im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured')", "userid = res[0] uname = res[1] fname = res[2] lname = res[3] print", "date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None:", "str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid = \" + str(detected_personid) comm", "print \"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now()", "'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to generate a random id for image", "self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10)", "userid = %d and date = '%s' \" % (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm", "cursor.fetchone() if res: userid = res[0] uname = res[1] fname = res[2] lname", "face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except Exception as e: print", "Face Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox =", "\"Attendance already marked for user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print", "len(faces)) if len(faces) > 0: detected_persons = [] persons = [] persons_cnt=0 detected_persons_cnt=0", "dirs: shutil.rmtree(os.path.join(root, d)) a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command to", "gmtime, strftime, sleep import sqlite3 # import imageUpload.py for uploading captured images to", "QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files", "= os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result", "= res[3] print \"Welcome %s !\" % (fname+' '+lname) detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now", "self.update_check() def stop_capture(self): if self.capturing: self.capturing = False self.capture.release() self.timer.stop() cv2.destroyAllWindows() def update_check(self):", "for user %s \" % uname else print \"Attendance already marked for user", "tmp_path = os.path.join(base_path,'tmp') placeholder_image = os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp", "im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)", "\"Total Faces in Image = %d \" % len(faces) #self.message_label.setText(\"Total Faces in Image", "+ str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid = \" + str(detected_personid)", "w, y + h), (0, 0, 255), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL ,", "os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = ''", "%d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification started ...') self.update_dynamic_frame() self.counter=0 else:", "FROM %s WHERE personid = '%s'\" % (TABLE_NAME,detected_personid) res = cursor.execute(comm) res =", "self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe = DynamicFrame(self.qt) self.qt.hbox4.addWidget(self.qt.Dynamicframe) self.qt.vbox = QVBoxLayout() self.qt.vbox.addLayout(self.qt.hbox4) self.qt.setLayout(self.qt.vbox)", "import datetime import string import random import shutil from time import gmtime, strftime,", "self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face", "* FROM users_present WHERE userid = %d and date = '%s' \" %", "welcome_names='' ramp_frames = 10 print \"Face identification started ..........\" cv2.destroyAllWindows() try: for i", "'/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \"", ") for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x +", "'' welcome_names='' ramp_frames = 10 print \"Face identification started ..........\" cv2.destroyAllWindows() try: for", "string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size)) def make_dir(path): try: os.makedirs(path)", "self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces =", "QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\"", "started ...') self.update_dynamic_frame() self.counter=0 else: self.counter=self.counter+1 def update_dynamic_frame(self): global current_userid global current_userfname detected_personid", "file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _", "flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x", "image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name = id_generator()+'.jpg' file =", "update_check(self): self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray =", "detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1] elif", "a = QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the running", "= QLabel() self.video_stream.setScaledContents(True) self.video_stream.setAlignment(Qt.AlignLeft) self.video_stream.setFixedSize(600,450) self.video_stream_label=QLabel('Live Video Stream') self.video_stream_label.setAlignment(Qt.AlignCenter) self.video_stream_label.setFont(font2) self.face_image = QLabel()", "chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _ in range(size)) def make_dir(path):", "detected_persons_cnt+=1 detected_persons.append(fname) persons.append(fname) now = datetime.datetime.now() comm = \"SELECT * FROM users_present WHERE", "# function to generate a random id for image file name def id_generator(size=20,", "self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(", "current_userid global current_userfname detected_personid = '' welcome_names='' ramp_frames = 10 print \"Face identification", "res[0] uname = res[1] fname = res[2] lname = res[3] print \"Welcome %s", "self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing = False self.capture.release() self.timer.stop()", "#self.message_label.setText(\"Total Faces in Image = %d \" % len(faces)) if len(faces) > 0:", "20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def", "uploading captured images to cloudinary import imageUpload as imup # import MSFaceAPI.py for", "= QLabel('Real-Time Face Recognition based Surveillance') self.label2 = QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3)", "make_dir(path): try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self,", "\" + str(faceid) detected_personid = msface.face_identify(faceid) if detected_personid: print \"detected_personid = \" +", "import MSFaceAPI as msface large_text_size = 22 medium_text_size = 14 small_text_size = 10", "y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x,", "\" % len(faces) #self.message_label.setText(\"Total Faces in Image = %d \" % len(faces)) if", "else: self.label2.setText('') print \"No person in image\" k=0 for (x, y, w, h)", "= QApplication(sys.argv) w = FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the running program", "found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if detected_persons_cnt > 1: for i in range(detected_persons_cnt-1): welcome_names =", "+ detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1]", "\"detected_personid = \" + str(detected_personid) comm = \"SELECT * FROM %s WHERE personid", "0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port = 0 font1 = QFont('Helvetica', small_text_size)", "+ file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url) print \"Result for person %d \" %", "#self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5,", "image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0], image.strides[0], QImage.Format_RGB888) self.face_image.setPixmap(QPixmap.fromImage(face_image)) except", "QLabel('') self.label1.setAlignment(Qt.AlignCenter) self.label2.setAlignment(Qt.AlignCenter) self.label1.setFont(font3) self.label2.setFont(font3) self.fbox = QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20)", "import imageUpload as imup # import MSFaceAPI.py for msface api calls import MSFaceAPI", "QFont('Helvetica', large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade", "faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces", "font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size) conn", "FROM users_present WHERE userid = %d and date = '%s' \" % (int(userid),", "= \"SELECT * FROM users_present WHERE userid = %d and date = '%s'", "cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) for", "detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('') print \"No", "10 print \"Face identification started ..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s,", "(x + w, y + h), (0, 255, 0), 2) cv2.putText(frame, persons[k],(x, y-10),", "persons_cnt+=1 image_crop = frame[y:y+h,x:x+w] #self.message_label.setText(\"Processing.. %d \" % persons_cnt) file_name = id_generator()+'.jpg' file", "1: for i in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] + ', '", "self.qt = QWidget() self.qt.showFullScreen() self.qt.pal=QPalette() self.qt.pal.setColor(QPalette.Background,QColor(0,0,0)) self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255)) self.qt.setPalette(self.qt.pal) self.bg_color=0 self.qt.hbox4 = QHBoxLayout() self.qt.Dynamicframe", "'__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path): for f in files:", "for msface api calls import MSFaceAPI as msface large_text_size = 22 medium_text_size =", "# import MSFaceAPI.py for msface api calls import MSFaceAPI as msface large_text_size =", "self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1 = QLabel('Real-Time Face Recognition", "id_generator()+'.jpg' file = os.path.join(tmp_path,file_name) cloudinary_url=cloudinary_tmp + '/' + file_name cv2.imwrite(file, image_crop) imup.upload_image(file,file_name) faceid=msface.face_detect(cloudinary_url)", "name def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars) for _ in", "= 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port = 0", "1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in", "0 font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 = QFont('Helvetica', large_text_size)", "cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x + w, y + h),", "= 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to generate a random id for", "w, y + h), (0, 255, 0), 2) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame", "= FullscreenWindow(a) sys.exit(a.exec_()) # command to terminate the running program # killall -9", "large_text_size) conn = sqlite3.connect(db_path) cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade =", "self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label) self.hbox2.setContentsMargins(20, 20, 20, 20) self.hbox2.setSpacing(10) self.label1", "frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000))", "..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s, im = self.capture.read() ret,frame =", "' + detected_persons[detected_persons_cnt-1] elif detected_persons_cnt>0: welcome_names = detected_persons[0] self.label2.setText('Hello '+ welcome_names) else: self.label2.setText('')", "80), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) print \"Total Faces in Image = %d \" % len(faces)", "%(int(userid)) #print format_str conn.execute(format_str) conn.commit() print \"Attendance marked for user %s \" %", "images to cloudinary import imageUpload as imup # import MSFaceAPI.py for msface api", "1,(0,0,255),1) k=k+1 #image=cv2.flip(frame, 1) image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) face_image = QImage(image, image.shape[1], image.shape[0],", "0), 2) cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1) else: cv2.rectangle(frame, (x, y), (x", "% (int(userid), now.strftime(\"%Y-%m-%d\")) #print comm res2=cursor.execute(comm) res2=cursor.fetchone() if res2==None: format_str = \"INSERT INTO", "> 1: for i in range(detected_persons_cnt-1): welcome_names = welcome_names + detected_persons[i] + ',", "self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2) self.vbox1=QVBoxLayout() self.vbox1.addWidget(self.video_stream) self.vbox1.addWidget(self.video_stream_label) self.vbox2=QVBoxLayout() self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout()", "self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000)) if self.counter==int(detection_interval/capture_interval): self.message_label.setText('Face identification", "time import gmtime, strftime, sleep import sqlite3 # import imageUpload.py for uploading captured", "format_str conn.execute(format_str) conn.commit() print \"Attendance marked for user %s \" % uname else", "except OSError: if not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs):", "= welcome_names + detected_persons[i] + ', ' welcome_names=welcome_names[:-2] welcome_names=welcome_names + ' & '", "camera_port = 0 font1 = QFont('Helvetica', small_text_size) font2 = QFont('Helvetica', medium_text_size) font3 =", "self.video_stream_label.setFont(font2) self.face_image = QLabel() self.face_image.setScaledContents(True) self.face_image.setFixedSize(600,450) self.face_image.setAlignment(Qt.AlignRight) self.face_image.setPixmap(QPixmap(placeholder_image)) self.face_image_label=QLabel('Last Capture Results') self.face_image_label.setAlignment(Qt.AlignCenter) self.face_image_label.setFont(font2)", "cursor = conn.cursor() TABLE_NAME=\"users\" cascPath = 'haarcascade_frontalface_default.xml' faceCascade = cv2.CascadeClassifier(cascPath) # function to", "for person %d \" % persons_cnt print \"Image File = \" + str(file)", "= 0 current_userfname = '' detection_interval=10000 capture_interval=30 camera_port = 0 font1 = QFont('Helvetica',", "QFormLayout() self.fbox.setAlignment(Qt.AlignCenter) self.fbox.setContentsMargins(20, 20, 20, 20) self.fbox.addRow(self.label1) self.fbox.addRow(self.label2) self.vbox = QVBoxLayout() self.vbox.addLayout(self.fbox) self.vbox.addLayout(self.hbox)", "os.path.join(base_path,'placeholder_600x400.svg') db_path = os.path.join(base_path,'users.db') cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset' cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp' current_userid = 0", "\"Face identification started ..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s, im =", "if not os.path.isdir(path): raise class DynamicFrame(QWidget): def __init__(self, parent, *args, **kwargs): super(DynamicFrame, self).__init__()", "for (x, y, w, h) in faces: if w*h>500: persons_cnt+=1 image_crop = frame[y:y+h,x:x+w]", "self.vbox2.addWidget(self.face_image) self.vbox2.addWidget(self.face_image_label) self.hbox=QHBoxLayout() self.hbox.addLayout(self.vbox1) self.hbox.addLayout(self.vbox2) self.hbox.setAlignment(Qt.AlignCenter) self.hbox.setSpacing(20) self.hbox2=QHBoxLayout() self.hbox2.setAlignment(Qt.AlignCenter) self.message_label=QLabel('message') self.message_label.setAlignment(Qt.AlignCenter) self.message_label.setFont(font2) self.hbox2.addWidget(self.message_label)", "\" + str(detected_personid) comm = \"SELECT * FROM %s WHERE personid = '%s'\"", "import imageUpload.py for uploading captured images to cloudinary import imageUpload as imup #", "user %s \" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop)", "cv2.flip(frame, 1) image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888) self.video_stream.setPixmap(QPixmap.fromImage(image)) self.message_label.setText('Next image capture", "random import shutil from time import gmtime, strftime, sleep import sqlite3 # import", "22 medium_text_size = 14 small_text_size = 10 base_path = os.path.dirname(os.path.realpath(__file__)) dataset_path = os.path.join(base_path,'dataset')", "for image file name def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase): return ''.join(random.choice(chars)", "s, im = self.capture.read() ret,frame = self.capture.read() #self.message_label.setText('Image Captured') self.capture_cnt+=1 gray = cv2.cvtColor(frame,", "self.qt.setLayout(self.qt.vbox) if __name__ == '__main__': make_dir(tmp_path) make_dir(unknown_user_path) for root, dirs, files in os.walk(tmp_path):", "self.video_timer = QTimer(self) self.video_timer.timeout.connect(self.display_video_stream) self.video_timer.start(capture_interval) def display_video_stream(self): ret,frame = self.capture.read() gray = cv2.cvtColor(frame,", "started ..........\" cv2.destroyAllWindows() try: for i in xrange(ramp_frames): s, im = self.capture.read() ret,frame", "\" % uname else: time_str=strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime()) print \"Unknown person found\" cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop) persons.append('Unknown') if", "w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h),", "self.vbox.addLayout(self.hbox) self.vbox.addLayout(self.hbox2) self.vbox.setAlignment(Qt.AlignCenter) self.setLayout(self.vbox) self.update_check() def stop_capture(self): if self.capturing: self.capturing = False self.capture.release()" ]
[ "import csv import pandas as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex =", "delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if count in", "f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i", "print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer", "1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row", "fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in clear:", "for row in reader: if count in arr: print(\"skipping \", count) count +=", "pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for", "csv import pandas as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\")", "count in arr: print(\"skipping \", count) count += 1 continue else: print(\"writting \",", "re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1 reader", "fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear print(\"array", "= re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr)", "= csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if count in arr: print(\"skipping", "import re import csv import pandas as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read()", "import pandas as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear", "= re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1", "if count in arr: print(\"skipping \", count) count += 1 continue else: print(\"writting", "open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in", "f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear", "clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\")", "'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if count", "print(\"array is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv',", "reader: if count in arr: print(\"skipping \", count) count += 1 continue else:", "reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in", "pandas as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear =", "in arr: print(\"skipping \", count) count += 1 continue else: print(\"writting \", count)", "for i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1 reader =", "i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv',", "= f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i)", "clear = re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count =", "as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex = re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc)", "in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"),", "= 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for", "count) count += 1 continue else: print(\"writting \", count) writer.writerow(row) count += 1", "is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"),", "csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if", "csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if count in arr: print(\"skipping \",", "count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\")", "in reader: if count in arr: print(\"skipping \", count) count += 1 continue", "arr: print(\"skipping \", count) count += 1 continue else: print(\"writting \", count) writer.writerow(row)", "print(\"skipping \", count) count += 1 continue else: print(\"writting \", count) writer.writerow(row) count", "= csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader:", "'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if count in arr: print(\"skipping \", count)", "delimiter=\"\\t\") for row in reader: if count in arr: print(\"skipping \", count) count", "re import csv import pandas as pd f= open(\"data-errors.txt\",'r',encoding=\"utf8\") fc = f.read() fcbRegex", "\", count) count += 1 continue else: print(\"writting \", count) writer.writerow(row) count +=", "row in reader: if count in arr: print(\"skipping \", count) count += 1", "re.compile(r\"line(\\s\\d+)\") clear = re.findall(fcbRegex,fc) for i in clear: print(\"lines\",i) arr=clear print(\"array is\",arr) count", "arr=clear print(\"array is\",arr) count = 1 reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding=\"utf8\"), delimiter=\"\\t\") writer =", "writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding=\"utf8\"), delimiter=\"\\t\") for row in reader: if count in arr:" ]
[ "the result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task raised a", "# Run whatever task we've been given result = task_function() logger.debug(\"Completed task\") #", "socket.recv() raise logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result) # Await the ok", "socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol:", "an exception \"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id)) # Run whatever task", "the whole timeout, then stop waiting last_job = time.time() while time.time() - last_job", "try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we know we want", "time.time() - last_job >= timeout: logger.debug(\"Waited too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP", "reply == b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again in a few seconds.\")", "= 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol: # Sent", "import logging import sys import time import traceback import dill as pickle import", ":returns: Pickle data of the result, or an exception \"\"\" try: logger.debug(\"Running task", "ok assert socket.recv() == b\"THX\" last_job = time.time() if time.time() - last_job >=", "waiting last_job = time.time() while time.time() - last_job < timeout: logger.debug(\"Asking for a", "error pickling here counts as a job failure return b\"YAY \" + pickle.dumps((task_id,", "so send the message letting # the master know. This is a little", "assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into task loop\") def _handle_task(socket, data):", "logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise", "bytes)\", task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as exc: # This is", "to load :returns: Pickle data of the result, or an exception \"\"\" try:", "attempting to pickle the result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the", "task\") # Everything else: We want to pass back across the network (_,", "in a pickle bundle. :arg byte data: The pickle-data to load :returns: Pickle", "logger.debug(\"Exception processing task\") # Everything else: We want to pass back across the", "\" + pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is interactive so we want", "we'll handle the # special case in the parent context raise except BaseException:", "logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit() # Be careful - we might", "# PLZ DO {task} Hand off task to runner # YAY {result} THX", "# IZ BORED {id} PLZ GOWAY Exit # PLZ WAIT Nothing to do;", "master know. This is a little unclean, but it's only # because we", "to pass back across the network (_, exc_value, exc_trace) = sys.exc_info() exc_trace =", "if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit() # Be careful", "logger.debug(\"Slave completed.\") # Messaging protocol: # Sent Recieved Action # ----------------------- ------------- ----------------------------------", "error attempting to pickle the result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when", "to a specific master URL. :param str server_url: The server string to use", "for handshake.\") sys.exit(1) else: # If waiting for the whole timeout, then stop", "too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing", "(_, exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't want to", "while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url,", ":param timeout: The time (in seconds) to wait with no jobs before terminating", "worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave", "if time.time() - last_job >= timeout: logger.debug(\"Waited too long for new tasks. Quitting.\")", "+ pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is interactive so we want to", "logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get", "so we want to let it float up - we'll handle the #", "KeyboardInterrupt: # Now, we know we want to quit - so send the", "Nothing to do; try again soon # PLZ DO {task} Hand off task", "URL. :param str server_url: The server string to use to connect :param str", "a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task raised a SystemExit exception, trying", "# ----------------------- ------------- ---------------------------------- # HELO IAM {id} HAY Negotiation success # IZ", "== b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again in a few seconds.\") time.sleep(min(timeout", "before terminating \"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url)) context = zmq.Context()", "task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id, len(data))", "An error pickling here counts as a job failure return b\"YAY \" +", "as specified in a pickle bundle. :arg byte data: The pickle-data to load", "socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance and connect it", "connect it to a specific master URL. :param str server_url: The server string", "exception, trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task, as specified in", "len(result)) socket.send(result) # Await the ok assert socket.recv() == b\"THX\" last_job = time.time()", "Await the ok assert socket.recv() == b\"THX\" last_job = time.time() if time.time() -", "Messaging protocol: # Sent Recieved Action # ----------------------- ------------- ---------------------------------- # HELO IAM", "might not be able to pickle the exception?? Go to lengths # to", "logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed", "KeyboardInterrupt as exc: # This is a special case; try to tell the", "do_task(task_id, task_function) except KeyboardInterrupt as exc: # This is a special case; try", "a task, as specified in a pickle bundle. :arg byte data: The pickle-data", "import traceback import dill as pickle import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception):", "# Everything else: We want to pass back across the network (_, exc_value,", "HAY Negotiation success # IZ BORED {id} PLZ GOWAY Exit # PLZ WAIT", "300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol: # Sent Recieved", "YAY {result} THX Task succeeded with result data # ONO {result} THX Task", "ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket,", "a slave instance and connect it to a specific master URL. :param str", "result, or an exception \"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id)) # Run", "data: The pickle-data to load :returns: Pickle data of the result, or an", "logger.debug(\"No tasks available. Trying again in a few seconds.\") time.sleep(min(timeout / 2.0, 5))", "timeout=30): \"\"\"Run a slave instance and connect it to a specific master URL.", "DO\"): try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we know we", "task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get a command returned", "ONO {result} THX Task failed - with exception data # IGIVEUP {id} BYE", "lengths # to make sure that we pass something sensible back try: pickle.dumps(exc_value)", "the task raised a SystemExit exception, trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do", "that we pass something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}:", "a slave instance. \"\"\" import logging import sys import time import traceback import", "If waiting for the whole timeout, then stop waiting last_job = time.time() while", "logger.debug(\"Got quit signal. ending main loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result =", "waiting for handshake.\") sys.exit(1) else: # If waiting for the whole timeout, then", "for the whole timeout, then stop waiting last_job = time.time() while time.time() -", "\"\"\" import logging import sys import time import traceback import dill as pickle", "# Sent Recieved Action # ----------------------- ------------- ---------------------------------- # HELO IAM {id} HAY", "jobs before terminating \"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url)) context =", "= time.time() if time.time() - last_job >= timeout: logger.debug(\"Waited too long for new", "sys import time import traceback import dill as pickle import zmq logger =", "except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else: # If waiting for", "The pickle-data to load :returns: Pickle data of the result, or an exception", "a little unclean, but it's only # because we are here that we", "specified in a pickle bundle. :arg byte data: The pickle-data to load :returns:", "logger.debug(\"Waited too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally:", "The server string to use to connect :param str worker_if: The worker ID", "the middle of a send/recv when the signal was sent logger.debug(\"Sending quit message", "task calling sys.exit\") exc_value = TaskSystemExit() # Be careful - we might not", "socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol: # Sent Recieved Action", "raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance and connect it to", "with result data # ONO {result} THX Task failed - with exception data", "# This is a special case; try to tell the master that we", "coding: utf-8 \"\"\" Running a slave instance. \"\"\" import logging import sys import", "wait with no jobs before terminating \"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id,", "sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return", "of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task raised a SystemExit exception,", "want to quit - so send the message letting # the master know.", "new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER =", "tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300", "< timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() #", "reply[7:]) except KeyboardInterrupt: # Now, we know we want to quit - so", "{}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get a command returned assert reply.startswith(b\"PLZ\") if", "reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main loop.\") break elif reply.startswith(b\"PLZ", "then stop waiting last_job = time.time() while time.time() - last_job < timeout: logger.debug(\"Asking", "timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else:", "- last_job >= timeout: logger.debug(\"Waited too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \"", "dill as pickle import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error", "given result = task_function() logger.debug(\"Completed task\") # An error pickling here counts as", "get a command returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No tasks", "main loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt:", "in # the middle of a send/recv when the signal was sent logger.debug(\"Sending", "we might not be able to pickle the exception?? Go to lengths #", "= socket.recv() # We get a command returned assert reply.startswith(b\"PLZ\") if reply ==", "logger.debug(\"Got hello. Going into task loop\") def _handle_task(socket, data): \"\"\"Handle a reply asking", "zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket,", "\" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \"", "result of %d bytes\", len(result)) socket.send(result) # Await the ok assert socket.recv() ==", "pickle-data to load :returns: Pickle data of the result, or an exception \"\"\"", "asking us to do a task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task", "_handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we know we want to quit -", "a send/recv when the signal was sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP", "DO {task} Hand off task to runner # YAY {result} THX Task succeeded", "SystemExit exception, trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task, as specified", "= int(1000 * timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for", "break elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now,", "exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance and connect", "string to use to connect :param str worker_if: The worker ID to use", "for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get a", "an error attempting to pickle the result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For", "run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance and connect it to a specific", "don't want to propagate a SystemExit to the other side if isinstance(exc_value, SystemExit):", "def do_task(task_id, task_function): \"\"\"Do a task, as specified in a pickle bundle. :arg", "Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close()", "special case in the parent context raise except BaseException: logger.debug(\"Exception processing task\") #", "\"\"\"Handle a reply asking us to do a task\"\"\" try: (task_id, task_function) =", "failed # to quit, then continue to raise the error. logger.info(\"Got interrupt while", "that we weren't in # the middle of a send/recv when the signal", "result data # ONO {result} THX Task failed - with exception data #", "connect to {}\".format(worker_id, server_url)) context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO", "assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again in", "result)) except KeyboardInterrupt: # This is interactive so we want to let it", "task loop\") def _handle_task(socket, data): \"\"\"Handle a reply asking us to do a", "b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main loop.\") break elif reply.startswith(b\"PLZ DO\"): try:", "task with ID {}\".format(task_id)) # Run whatever task we've been given result =", "is a special case; try to tell the master that we failed #", "reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we know", "for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER", "handshake.\") sys.exit(1) else: # If waiting for the whole timeout, then stop waiting", "# We don't want to propagate a SystemExit to the other side if", "\" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a", "succeeded with result data # ONO {result} THX Task failed - with exception", "try: logger.debug(\"Running task with ID {}\".format(task_id)) # Run whatever task we've been given", "except KeyboardInterrupt: # This is interactive so we want to let it float", "task\") # An error pickling here counts as a job failure return b\"YAY", "confirmation of hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into task", "be able to pickle the exception?? Go to lengths # to make sure", "logger.debug(\"Got task %s (%d bytes)\", task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as", "as exc: # This is a special case; try to tell the master", "logging import sys import time import traceback import dill as pickle import zmq", "+ pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave", "seconds) to wait with no jobs before terminating \"\"\" logger.debug(\"Running slave {} connect", "message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d", "server_url: The server string to use to connect :param str worker_if: The worker", "available. Trying again in a few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply", "Now, we know we want to quit - so send the message letting", "pickle the exception?? Go to lengths # to make sure that we pass", "data # ONO {result} THX Task failed - with exception data # IGIVEUP", "we want to quit - so send the message letting # the master", "when communicating :param timeout: The time (in seconds) to wait with no jobs", "here that we can guarantee that we weren't in # the middle of", "socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result)", "logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle the result of a", "tasks available. Trying again in a few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif", "let it float up - we'll handle the # special case in the", "hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv()", "/ 2.0, 5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main", "pass back across the network (_, exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace)", "GOWAY\": logger.debug(\"Got quit signal. ending main loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result", "raise logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result) # Await the ok assert", "bytes\", len(result)) socket.send(result) # Await the ok assert socket.recv() == b\"THX\" last_job =", "(task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id, len(data)) return do_task(task_id,", "exception \"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id)) # Run whatever task we've", "reply = socket.recv() # We get a command returned assert reply.startswith(b\"PLZ\") if reply", "exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't want to propagate", "\"\"\"Do a task, as specified in a pickle bundle. :arg byte data: The", "whole timeout, then stop waiting last_job = time.time() while time.time() - last_job <", "socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol: #", "job failure return b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is", "== b\"THX\" last_job = time.time() if time.time() - last_job >= timeout: logger.debug(\"Waited too", "time.time() while time.time() - last_job < timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED", "task_function): \"\"\"Do a task, as specified in a pickle bundle. :arg byte data:", "propagate a SystemExit to the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling", "= traceback.format_tb(exc_trace) # We don't want to propagate a SystemExit to the other", "interrupt while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise def", "off task to runner # YAY {result} THX Task succeeded with result data", "class TaskSystemExit(Exception): \"\"\"For when the task raised a SystemExit exception, trying to quit\"\"\"", "but it's only # because we are here that we can guarantee that", "handle the # special case in the parent context raise except BaseException: logger.debug(\"Exception", "terminating \"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url)) context = zmq.Context() socket", "sys.exit\") exc_value = TaskSystemExit() # Be careful - we might not be able", "# This is interactive so we want to let it float up -", "a specific master URL. :param str server_url: The server string to use to", "middle of a send/recv when the signal was sent logger.debug(\"Sending quit message after", "= logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle the result of", "to do a task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d", "task_function) except KeyboardInterrupt as exc: # This is a special case; try to", "# If waiting for the whole timeout, then stop waiting last_job = time.time()", "a command returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No tasks available.", "socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\")", "(in seconds) to wait with no jobs before terminating \"\"\" logger.debug(\"Running slave {}", "communicating :param timeout: The time (in seconds) to wait with no jobs before", "import dill as pickle import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an", "socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out", "command returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying", "timeout: The time (in seconds) to wait with no jobs before terminating \"\"\"", "worker_id, timeout=30): \"\"\"Run a slave instance and connect it to a specific master", "PLZ DO {task} Hand off task to runner # YAY {result} THX Task", "logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url)) context = zmq.Context() socket = context.socket(zmq.REQ)", "other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit() #", "= task_function() logger.debug(\"Completed task\") # An error pickling here counts as a job", "case in the parent context raise except BaseException: logger.debug(\"Exception processing task\") # Everything", "{}\".format(task_id)) # Run whatever task we've been given result = task_function() logger.debug(\"Completed task\")", "quit signal. ending main loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket,", "b\"HAY\" logger.debug(\"Got hello. Going into task loop\") def _handle_task(socket, data): \"\"\"Handle a reply", "task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as exc: # This is a", "Hand off task to runner # YAY {result} THX Task succeeded with result", "# Be careful - we might not be able to pickle the exception??", "exc_value = TaskSystemExit() # Be careful - we might not be able to", "last_job = time.time() while time.time() - last_job < timeout: logger.debug(\"Asking for a task\")", "{task} Hand off task to runner # YAY {result} THX Task succeeded with", "sys.exit(1) else: # If waiting for the whole timeout, then stop waiting last_job", "BORED {id} PLZ GOWAY Exit # PLZ WAIT Nothing to do; try again", "worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result) # Await the", "# special case in the parent context raise except BaseException: logger.debug(\"Exception processing task\")", "last_job < timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv()", "== b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main loop.\") break elif reply.startswith(b\"PLZ DO\"):", "Run whatever task we've been given result = task_function() logger.debug(\"Completed task\") # An", "again in a few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply == b\"PLZ", "PLZ GOWAY Exit # PLZ WAIT Nothing to do; try again soon #", "send the message letting # the master know. This is a little unclean,", "to pickle the result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task", "send/recv when the signal was sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \"", "slave instance and connect it to a specific master URL. :param str server_url:", "# HELO IAM {id} HAY Negotiation success # IZ BORED {id} PLZ GOWAY", "utf-8 \"\"\" Running a slave instance. \"\"\" import logging import sys import time", "str server_url: The server string to use to connect :param str worker_if: The", "the message letting # the master know. This is a little unclean, but", "pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace,", "special case; try to tell the master that we failed # to quit,", "if reply == b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again in a few", "= TaskSystemExit() # Be careful - we might not be able to pickle", "a pickle bundle. :arg byte data: The pickle-data to load :returns: Pickle data", "isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit() # Be careful -", "calling sys.exit\") exc_value = TaskSystemExit() # Be careful - we might not be", "hello. Going into task loop\") def _handle_task(socket, data): \"\"\"Handle a reply asking us", "{result} THX Task failed - with exception data # IGIVEUP {id} BYE Quitting;", "BaseException: logger.debug(\"Exception processing task\") # Everything else: We want to pass back across", "exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value))", "HELO IAM {id} HAY Negotiation success # IZ BORED {id} PLZ GOWAY Exit", "task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id, len(data)) return do_task(task_id, task_function)", "task raised a SystemExit exception, trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a", "+ worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello.", "failure return b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is interactive", "ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle the result of a task\"\"\" class", "except BaseException: logger.debug(\"Exception processing task\") # Everything else: We want to pass back", "we failed # to quit, then continue to raise the error. logger.info(\"Got interrupt", "then continue to raise the error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \"", "IZ BORED {id} PLZ GOWAY Exit # PLZ WAIT Nothing to do; try", "pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance", "Going into task loop\") def _handle_task(socket, data): \"\"\"Handle a reply asking us to", "know we want to quit - so send the message letting # the", ":param str worker_if: The worker ID to use when communicating :param timeout: The", "BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get a command returned assert reply.startswith(b\"PLZ\")", "Action # ----------------------- ------------- ---------------------------------- # HELO IAM {id} HAY Negotiation success #", "# to quit, then continue to raise the error. logger.info(\"Got interrupt while processing", "return b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is interactive so", "pickling here counts as a job failure return b\"YAY \" + pickle.dumps((task_id, result))", "it to a specific master URL. :param str server_url: The server string to", "a SystemExit to the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\")", "to tell the master that we failed # to quit, then continue to", "return do_task(task_id, task_function) except KeyboardInterrupt as exc: # This is a special case;", "context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout)", "while time.time() - last_job < timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\"))", "timeout, then stop waiting last_job = time.time() while time.time() - last_job < timeout:", "# Messaging protocol: # Sent Recieved Action # ----------------------- ------------- ---------------------------------- # HELO", "parent context raise except BaseException: logger.debug(\"Exception processing task\") # Everything else: We want", "we weren't in # the middle of a send/recv when the signal was", "This is a special case; try to tell the master that we failed", "{}\".format(worker_id, server_url)) context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000", "- we might not be able to pickle the exception?? Go to lengths", "can guarantee that we weren't in # the middle of a send/recv when", "few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit", "guarantee that we weren't in # the middle of a send/recv when the", "= time.time() while time.time() - last_job < timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ", "\" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got", "data): \"\"\"Handle a reply asking us to do a task\"\"\" try: (task_id, task_function)", "socket.recv() == b\"THX\" last_job = time.time() if time.time() - last_job >= timeout: logger.debug(\"Waited", "context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again:", "context raise except BaseException: logger.debug(\"Exception processing task\") # Everything else: We want to", "TaskSystemExit(Exception): \"\"\"For when the task raised a SystemExit exception, trying to quit\"\"\" def", "exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation", "here counts as a job failure return b\"YAY \" + pickle.dumps((task_id, result)) except", "raise the error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\",", "except KeyboardInterrupt as exc: # This is a special case; try to tell", "_handle_task(socket, data): \"\"\"Handle a reply asking us to do a task\"\"\" try: (task_id,", "server_url)) context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 *", "the network (_, exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't", "pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt", "# PLZ WAIT Nothing to do; try again soon # PLZ DO {task}", "We get a command returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No", "logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle the result", "exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't want to propagate a", "worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going", "This is a little unclean, but it's only # because we are here", "# YAY {result} THX Task succeeded with result data # ONO {result} THX", "%s (%d bytes)\", task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as exc: #", "Negotiation success # IZ BORED {id} PLZ GOWAY Exit # PLZ WAIT Nothing", "\"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id)) # Run whatever task we've been", "the ok assert socket.recv() == b\"THX\" last_job = time.time() if time.time() - last_job", "raise except BaseException: logger.debug(\"Exception processing task\") # Everything else: We want to pass", "5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main loop.\") break", "when the task raised a SystemExit exception, trying to quit\"\"\" def do_task(task_id, task_function):", "of hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into task loop\")", "with no jobs before terminating \"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url))", "def _handle_task(socket, data): \"\"\"Handle a reply asking us to do a task\"\"\" try:", "ending main loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:]) except", "IAM {id} HAY Negotiation success # IZ BORED {id} PLZ GOWAY Exit #", "only # because we are here that we can guarantee that we weren't", "GOWAY Exit # PLZ WAIT Nothing to do; try again soon # PLZ", "THX Task succeeded with result data # ONO {result} THX Task failed -", "side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit() # Be", "len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as exc: # This is a special", "to lengths # to make sure that we pass something sensible back try:", "{}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id):", "str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending", "logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert", "{id} PLZ GOWAY Exit # PLZ WAIT Nothing to do; try again soon", "socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv() ==", "to raise the error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id,", "{} connect to {}\".format(worker_id, server_url)) context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url)", "+ worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result) # Await", "slave instance. \"\"\" import logging import sys import time import traceback import dill", "quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of", "finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") #", "str worker_if: The worker ID to use when communicating :param timeout: The time", "_do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else: # If", "the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit()", "Task failed - with exception data # IGIVEUP {id} BYE Quitting; given up", "to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task, as specified in a pickle", "timeout: logger.debug(\"Waited too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv()", "raised a SystemExit exception, trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task,", "timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We", "- with exception data # IGIVEUP {id} BYE Quitting; given up with processing", "# the master know. This is a little unclean, but it's only #", "logger.debug(\"Running task with ID {}\".format(task_id)) # Run whatever task we've been given result", "little unclean, but it's only # because we are here that we can", "This is interactive so we want to let it float up - we'll", "Go to lengths # to make sure that we pass something sensible back", "zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else: # If waiting for the", "- so send the message letting # the master know. This is a", "pickle bundle. :arg byte data: The pickle-data to load :returns: Pickle data of", "to let it float up - we'll handle the # special case in", "case; try to tell the master that we failed # to quit, then", "# to make sure that we pass something sensible back try: pickle.dumps(exc_value) except", "WAIT\": logger.debug(\"No tasks available. Trying again in a few seconds.\") time.sleep(min(timeout / 2.0,", "trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task, as specified in a", "SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value = TaskSystemExit() # Be careful - we", "- we'll handle the # special case in the parent context raise except", ":arg byte data: The pickle-data to load :returns: Pickle data of the result,", "server string to use to connect :param str worker_if: The worker ID to", "task, as specified in a pickle bundle. :arg byte data: The pickle-data to", "traceback import dill as pickle import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent", "that we failed # to quit, then continue to raise the error. logger.info(\"Got", "exc: # This is a special case; try to tell the master that", "---------------------------------- # HELO IAM {id} HAY Negotiation success # IZ BORED {id} PLZ", "pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" +", "try to tell the master that we failed # to quit, then continue", "elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main loop.\") break elif", "we've been given result = task_function() logger.debug(\"Completed task\") # An error pickling here", "ID to use when communicating :param timeout: The time (in seconds) to wait", "pickle import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to", "SystemExit to the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value", "to do; try again soon # PLZ DO {task} Hand off task to", ":param str server_url: The server string to use to connect :param str worker_if:", "worker ID to use when communicating :param timeout: The time (in seconds) to", "Task succeeded with result data # ONO {result} THX Task failed - with", "to use when communicating :param timeout: The time (in seconds) to wait with", "traceback.format_tb(exc_trace) # We don't want to propagate a SystemExit to the other side", "logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into", "load :returns: Pickle data of the result, or an exception \"\"\" try: logger.debug(\"Running", "been given result = task_function() logger.debug(\"Completed task\") # An error pickling here counts", "a task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id,", "Sent Recieved Action # ----------------------- ------------- ---------------------------------- # HELO IAM {id} HAY Negotiation", "= ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def", "= zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try:", "task_function() logger.debug(\"Completed task\") # An error pickling here counts as a job failure", "# Now, we know we want to quit - so send the message", "interactive so we want to let it float up - we'll handle the", "* timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1)", "a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get a command", "to runner # YAY {result} THX Task succeeded with result data # ONO", "Be careful - we might not be able to pickle the exception?? Go", "the master know. This is a little unclean, but it's only # because", "import time import traceback import dill as pickle import zmq logger = logging.getLogger(__name__)", "byte data: The pickle-data to load :returns: Pickle data of the result, or", "logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result) # Await the ok assert socket.recv()", "float up - we'll handle the # special case in the parent context", ">= timeout: logger.debug(\"Waited too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\"))", "socket.send(result) # Await the ok assert socket.recv() == b\"THX\" last_job = time.time() if", "----------------------- ------------- ---------------------------------- # HELO IAM {id} HAY Negotiation success # IZ BORED", "logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result", "TaskSystemExit() # Be careful - we might not be able to pickle the", "# ONO {result} THX Task failed - with exception data # IGIVEUP {id}", "the parent context raise except BaseException: logger.debug(\"Exception processing task\") # Everything else: We", "socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply = socket.recv() # We get a command returned assert", "{id} HAY Negotiation success # IZ BORED {id} PLZ GOWAY Exit # PLZ", "signal. ending main loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:])", "WAIT Nothing to do; try again soon # PLZ DO {task} Hand off", "KeyboardInterrupt: # This is interactive so we want to let it float up", "Recieved Action # ----------------------- ------------- ---------------------------------- # HELO IAM {id} HAY Negotiation success", "network (_, exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't want", "elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we", "out waiting for handshake.\") sys.exit(1) else: # If waiting for the whole timeout,", "result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task raised a SystemExit", "continue to raise the error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \" +", "counts as a job failure return b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt:", "\"\", exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance and", "reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again in a", "whatever task we've been given result = task_function() logger.debug(\"Completed task\") # An error", "quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task, as specified in a pickle bundle.", "to quit, then continue to raise the error. logger.info(\"Got interrupt while processing task\")", "+ pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" +", "def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of", "we want to let it float up - we'll handle the # special", "exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting", "= _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we know we want to quit", "is a little unclean, but it's only # because we are here that", "reply asking us to do a task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got", "loop.\") break elif reply.startswith(b\"PLZ DO\"): try: result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: #", "quit - so send the message letting # the master know. This is", "logger.debug(\"Completed task\") # An error pickling here counts as a job failure return", "to pickle the exception?? Go to lengths # to make sure that we", "as pickle import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting", "# coding: utf-8 \"\"\" Running a slave instance. \"\"\" import logging import sys", "as a job failure return b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt: #", "import sys import time import traceback import dill as pickle import zmq logger", "sure that we pass something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value =", "Pickle data of the result, or an exception \"\"\" try: logger.debug(\"Running task with", "2.0, 5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending main loop.\")", "The worker ID to use when communicating :param timeout: The time (in seconds)", "time import traceback import dill as pickle import zmq logger = logging.getLogger(__name__) class", "waiting for the whole timeout, then stop waiting last_job = time.time() while time.time()", "time.time() if time.time() - last_job >= timeout: logger.debug(\"Waited too long for new tasks.", "slave {} connect to {}\".format(worker_id, server_url)) context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\")", "into task loop\") def _handle_task(socket, data): \"\"\"Handle a reply asking us to do", "worker_if: The worker ID to use when communicating :param timeout: The time (in", "do_task(task_id, task_function): \"\"\"Do a task, as specified in a pickle bundle. :arg byte", "Exit # PLZ WAIT Nothing to do; try again soon # PLZ DO", "long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\")", "to connect :param str worker_if: The worker ID to use when communicating :param", "do a task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\",", "specific master URL. :param str server_url: The server string to use to connect", "up - we'll handle the # special case in the parent context raise", "to {}\".format(worker_id, server_url)) context = zmq.Context() socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO =", "Trying again in a few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply ==", "# We get a command returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\":", "time (in seconds) to wait with no jobs before terminating \"\"\" logger.debug(\"Running slave", "The time (in seconds) to wait with no jobs before terminating \"\"\" logger.debug(\"Running", "THX Task failed - with exception data # IGIVEUP {id} BYE Quitting; given", "loop\") def _handle_task(socket, data): \"\"\"Handle a reply asking us to do a task\"\"\"", "do; try again soon # PLZ DO {task} Hand off task to runner", "worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else: # If waiting", "hello recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into task loop\") def", "logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else: # If waiting for the whole", "to quit - so send the message letting # the master know. This", "to use to connect :param str worker_if: The worker ID to use when", "_do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello", "back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO", "= context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket, worker_id) except", "socket.recv() # We get a command returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ", "assert socket.recv() == b\"THX\" last_job = time.time() if time.time() - last_job >= timeout:", "runner # YAY {result} THX Task succeeded with result data # ONO {result}", "with ID {}\".format(task_id)) # Run whatever task we've been given result = task_function()", "we know we want to quit - so send the message letting #", "\" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d bytes\", len(result)) socket.send(result) #", "back across the network (_, exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) #", "a job failure return b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt: # This", "socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into task loop\") def _handle_task(socket, data): \"\"\"Handle", "# because we are here that we can guarantee that we weren't in", "or an exception \"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id)) # Run whatever", "a few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got", "make sure that we pass something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value", "it float up - we'll handle the # special case in the parent", "to wait with no jobs before terminating \"\"\" logger.debug(\"Running slave {} connect to", "try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id, len(data)) return", "Running a slave instance. \"\"\" import logging import sys import time import traceback", "processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url, worker_id,", "we can guarantee that we weren't in # the middle of a send/recv", "use when communicating :param timeout: The time (in seconds) to wait with no", "logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol: # Sent Recieved Action #", "task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30):", "us to do a task\"\"\" try: (task_id, task_function) = pickle.loads(data) logger.debug(\"Got task %s", "IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\") assert socket.recv() == b\"HAY\"", "== b\"HAY\" logger.debug(\"Got hello. Going into task loop\") def _handle_task(socket, data): \"\"\"Handle a", "data of the result, or an exception \"\"\" try: logger.debug(\"Running task with ID", "instance and connect it to a specific master URL. :param str server_url: The", "unclean, but it's only # because we are here that we can guarantee", "class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle the result of a task\"\"\"", "we pass something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format(", "was sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise", "b\"THX\" last_job = time.time() if time.time() - last_job >= timeout: logger.debug(\"Waited too long", "use to connect :param str worker_if: The worker ID to use when communicating", "recieved\") assert socket.recv() == b\"HAY\" logger.debug(\"Got hello. Going into task loop\") def _handle_task(socket,", "to make sure that we pass something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError:", "quit, then continue to raise the error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO", "the result, or an exception \"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id)) #", "exc_trace = traceback.format_tb(exc_trace) # We don't want to propagate a SystemExit to the", "\"\"\"For when the task raised a SystemExit exception, trying to quit\"\"\" def do_task(task_id,", "in a few seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply == b\"PLZ GOWAY\":", "\"\"\"Run a slave instance and connect it to a specific master URL. :param", "task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task raised a SystemExit exception, trying to", "try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\") sys.exit(1) else: #", "task we've been given result = task_function() logger.debug(\"Completed task\") # An error pickling", "- last_job < timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply =", "return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO", "time.sleep(min(timeout / 2.0, 5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal. ending", "pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\"))", "it's only # because we are here that we can guarantee that we", "in the parent context raise except BaseException: logger.debug(\"Exception processing task\") # Everything else:", "else: We want to pass back across the network (_, exc_value, exc_trace) =", "Everything else: We want to pass back across the network (_, exc_value, exc_trace)", "= sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't want to propagate a SystemExit", "except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \" + pickle.dumps((task_id,", "except KeyboardInterrupt: # Now, we know we want to quit - so send", "want to propagate a SystemExit to the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted", "def run_slave(server_url, worker_id, timeout=30): \"\"\"Run a slave instance and connect it to a", "exception?? Go to lengths # to make sure that we pass something sensible", "message letting # the master know. This is a little unclean, but it's", "are here that we can guarantee that we weren't in # the middle", "= pickle.loads(data) logger.debug(\"Got task %s (%d bytes)\", task_id, len(data)) return do_task(task_id, task_function) except", "want to let it float up - we'll handle the # special case", "sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending", "# An error pickling here counts as a job failure return b\"YAY \"", "that we can guarantee that we weren't in # the middle of a", "the signal was sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\"))", "import zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle", "b\"YAY \" + pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is interactive so we", "# the middle of a send/recv when the signal was sent logger.debug(\"Sending quit", "instance. \"\"\" import logging import sys import time import traceback import dill as", "pickle the result of a task\"\"\" class TaskSystemExit(Exception): \"\"\"For when the task raised", "We want to pass back across the network (_, exc_value, exc_trace) = sys.exc_info()", "# Await the ok assert socket.recv() == b\"THX\" last_job = time.time() if time.time()", "result = _handle_task(socket, reply[7:]) except KeyboardInterrupt: # Now, we know we want to", "int(1000 * timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting for handshake.\")", "result = task_function() logger.debug(\"Completed task\") # An error pickling here counts as a", "connect :param str worker_if: The worker ID to use when communicating :param timeout:", "try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value))) return b\"ONO \"", "returned assert reply.startswith(b\"PLZ\") if reply == b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again", "context.term() logger.debug(\"Slave completed.\") # Messaging protocol: # Sent Recieved Action # ----------------------- -------------", "to the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task calling sys.exit\") exc_value =", "last_job = time.time() if time.time() - last_job >= timeout: logger.debug(\"Waited too long for", "letting # the master know. This is a little unclean, but it's only", "%d bytes\", len(result)) socket.send(result) # Await the ok assert socket.recv() == b\"THX\" last_job", "b\"PLZ WAIT\": logger.debug(\"No tasks available. Trying again in a few seconds.\") time.sleep(min(timeout /", "worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM \" + worker_id.encode(\"utf-8\")) logger.debug(\"Awaiting confirmation of hello recieved\")", "context\") context.term() logger.debug(\"Slave completed.\") # Messaging protocol: # Sent Recieved Action # -----------------------", "of a send/recv when the signal was sent logger.debug(\"Sending quit message after keyboardinterrupt\")", "and connect it to a specific master URL. :param str server_url: The server", "of the result, or an exception \"\"\" try: logger.debug(\"Running task with ID {}\".format(task_id))", "sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We don't want to propagate a SystemExit to", "pass something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)),", "the error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc)))", "task to runner # YAY {result} THX Task succeeded with result data #", "str(exc_value))) return b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\")", "socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing", "protocol: # Sent Recieved Action # ----------------------- ------------- ---------------------------------- # HELO IAM {id}", "------------- ---------------------------------- # HELO IAM {id} HAY Negotiation success # IZ BORED {id}", "logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term() logger.debug(\"Slave completed.\") # Messaging", "PLZ WAIT Nothing to do; try again soon # PLZ DO {task} Hand", "careful - we might not be able to pickle the exception?? Go to", "\"\"\"Represent an error attempting to pickle the result of a task\"\"\" class TaskSystemExit(Exception):", "not be able to pickle the exception?? Go to lengths # to make", "task %s (%d bytes)\", task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as exc:", "of %d bytes\", len(result)) socket.send(result) # Await the ok assert socket.recv() == b\"THX\"", "tell the master that we failed # to quit, then continue to raise", "a special case; try to tell the master that we failed # to", "keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d bytes\", len(result))", "last_job >= timeout: logger.debug(\"Waited too long for new tasks. Quitting.\") socket.send(b\"IGIVEUP \" +", "master URL. :param str server_url: The server string to use to connect :param", "no jobs before terminating \"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url)) context", "signal was sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv()", "time.time() - last_job < timeout: logger.debug(\"Asking for a task\") socket.send(\"IZ BORED {}\".format(worker_id).encode(\"UTF-8\")) reply", "+ worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\") context.term()", "able to pickle the exception?? Go to lengths # to make sure that", "know. This is a little unclean, but it's only # because we are", "success # IZ BORED {id} PLZ GOWAY Exit # PLZ WAIT Nothing to", "processing task\") # Everything else: We want to pass back across the network", "to propagate a SystemExit to the other side if isinstance(exc_value, SystemExit): logger.debug(\"Intercepted task", "a reply asking us to do a task\"\"\" try: (task_id, task_function) = pickle.loads(data)", "weren't in # the middle of a send/recv when the signal was sent", "else: # If waiting for the whole timeout, then stop waiting last_job =", "when the signal was sent logger.debug(\"Sending quit message after keyboardinterrupt\") socket.send(b\"IGIVEUP \" +", "soon # PLZ DO {task} Hand off task to runner # YAY {result}", "socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket, worker_id) except zmq.error.Again: logger.debug(\"Timed out waiting", "across the network (_, exc_value, exc_trace) = sys.exc_info() exc_trace = traceback.format_tb(exc_trace) # We", "want to pass back across the network (_, exc_value, exc_trace) = sys.exc_info() exc_trace", "bundle. :arg byte data: The pickle-data to load :returns: Pickle data of the", "\"\"\" logger.debug(\"Running slave {} connect to {}\".format(worker_id, server_url)) context = zmq.Context() socket =", "try again soon # PLZ DO {task} Hand off task to runner #", "because we are here that we can guarantee that we weren't in #", "is interactive so we want to let it float up - we'll handle", "\" + worker_id.encode(\"utf-8\")) socket.recv() finally: logger.debug(\"Closing socket\") socket.LINGER = 300 socket.close() logger.debug(\"Closing context\")", "We don't want to propagate a SystemExit to the other side if isinstance(exc_value,", "(%d bytes)\", task_id, len(data)) return do_task(task_id, task_function) except KeyboardInterrupt as exc: # This", "the master that we failed # to quit, then continue to raise the", "again soon # PLZ DO {task} Hand off task to runner # YAY", "something sensible back try: pickle.dumps(exc_value) except pickle.PicklingError: exc_value = ExceptionPicklingError(\"{}: {}\".format( str(type(exc_value)), str(exc_value)))", "error. logger.info(\"Got interrupt while processing task\") socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv()", "b\"ONO \" + pickle.dumps((task_id, exc_trace, exc_value)) def _do_handshake(socket, worker_id): logger.debug(\"Sending hello\") socket.send(b\"HELO IAM", "a SystemExit exception, trying to quit\"\"\" def do_task(task_id, task_function): \"\"\"Do a task, as", "pickle.dumps((task_id, result)) except KeyboardInterrupt: # This is interactive so we want to let", "{result} THX Task succeeded with result data # ONO {result} THX Task failed", "we are here that we can guarantee that we weren't in # the", "completed.\") # Messaging protocol: # Sent Recieved Action # ----------------------- ------------- ---------------------------------- #", "after keyboardinterrupt\") socket.send(b\"IGIVEUP \" + worker_id.encode(\"utf-8\")) socket.recv() raise logger.debug(\"Sending result of %d bytes\",", "zmq logger = logging.getLogger(__name__) class ExceptionPicklingError(Exception): \"\"\"Represent an error attempting to pickle the", "master that we failed # to quit, then continue to raise the error.", "seconds.\") time.sleep(min(timeout / 2.0, 5)) elif reply == b\"PLZ GOWAY\": logger.debug(\"Got quit signal.", "socket = context.socket(zmq.REQ) logger.debug(\"Connecting\") socket.connect(server_url) socket.RCVTIMEO = int(1000 * timeout) try: _do_handshake(socket, worker_id)", "stop waiting last_job = time.time() while time.time() - last_job < timeout: logger.debug(\"Asking for", "socket.send(b\"ONO \" + pickle.dumps((task_id, \"\", exc))) socket.recv() raise def run_slave(server_url, worker_id, timeout=30): \"\"\"Run", "failed - with exception data # IGIVEUP {id} BYE Quitting; given up with", "\"\"\" Running a slave instance. \"\"\" import logging import sys import time import", "ID {}\".format(task_id)) # Run whatever task we've been given result = task_function() logger.debug(\"Completed", "the exception?? Go to lengths # to make sure that we pass something", "the # special case in the parent context raise except BaseException: logger.debug(\"Exception processing" ]
[ "self.href = href self.this_milestone = this_milestone def __iter__(self): if self.this_milestone is not None:", "if self.this_milestone is not None: # for /milestone/xxx milestone = self.this_milestone else: milestone", "if key not in worker_hours: worker_hours[key] = 0 if milestone and milestone !=", "i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours = {} if details !=", "markup injection\"\"\" def __init__(self, req, buffer, hours, href, this_milestone): self.req = req self.buffer", "def __init__(self, req, buffer, hours, href, this_milestone): self.req = req self.buffer = buffer", "class_=\"first interval\")) date = hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if", "= m.group('field') user = m.group('user') if field == 'tickets': return self.user_by_ticket(req, user) elif", "trac_hours = TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones') this_milestone = None if", "seconds=59) else: to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw)", "filter the stream for the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if filename", "have received as part of this distribution. # import re import calendar import", "{} milestones = data.get('milestones') this_milestone = None if milestones is None: # /milestone", "= {'hours_format': hours_format} # date data self.date_data(req, data) # milestone data milestone =", "10, -1) if 'from_date' in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw", "in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours = {} if details != 'date':", "= ['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css')", "milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours =", "match_request(self, req): return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None", "from_date_raw = from_date_raw - timedelta(days=7) if 'to_date' in req.args: to_date_raw = user_time(req, parse_date,", "tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket = Ticket(self.env,", "'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user): \"\"\"hours", "add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv',", "req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours", "%s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours =", "'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker, hours]) return content.getvalue(), '%s;text/csv' % mimetype", "\"\"\"data for the date\"\"\" now = datetime.now() data['days'] = range(1, 32) data['months'] =", "datetime, timedelta from pkg_resources import parse_version from genshi.filters import Transformer from genshi.filters.transform import", "from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw -", "trac.util.translation import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome,", "sum(hours[1] for hours in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO() writer", "import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours import TracHoursPlugin, _", "if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry", "oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream", "3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for", "ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] +=", "format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user):", "try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours", "= req.href('/hours/user', **args) def users(self, req): \"\"\"hours for all users\"\"\" data = {'hours_format':", "view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] =", "add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else:", "Week')) # add_link(req, 'next', self.get_href(query, args, context.href), # _('Next Week')) # prevnext_nav(req, _('Prev", "buffer = StringIO() writer = csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()])", "details = req.args.get('details') worker_hours = {} if details != 'date': for entry in", "entry['worker'] key = (date, worker) if key not in worker_hours: worker_hours[key] = 0", "# for /milestone/xxx milestone = self.this_milestone else: milestone = self.buffer.events[3][1] if milestone not", "sum(hours[-1] for hours in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css')", "return [] def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler", "methods def filter_stream(self, req, method, filename, stream, data): \"\"\" filter the stream for", "if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours", "from StringIO import StringIO from datetime import datetime, timedelta from pkg_resources import parse_version", "= range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10, -1)", "req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info,", "not (estimated_hours or total_hours): return iter([]) items = [] if estimated_hours: if parse_version(TRAC_VERSION)", "self.user_by_date(req, user) # Internal methods def date_data(self, req, data): \"\"\"data for the date\"\"\"", "href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format", "from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] =", "'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev Week')) #", "data = {'hours_format': hours_format} # date data self.date_data(req, data) # milestone data milestone", "utf-8 -*- # # Copyright (C) 2009 <NAME> <<EMAIL>> # All rights reserved.", "import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self, req): return req.path_info", "worker_hours = {} for entry in hours: date = user_time(req, format_date, entry['time_started']) ticket", "sum(hours[2] for hours in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO() writer", "user\"\"\" data = {'hours_format': hours_format, 'worker': user} self.date_data(req, data) args = [user] args", "injection\"\"\" def __init__(self, req, buffer, hours, href, this_milestone): self.req = req self.buffer =", "\"\"\" filter the stream for the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if", "hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours # update date", "data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in worker_hours: ids = ['#{}'.format(id)", "format_date, entry['time_started']) ticket = entry['ticket'] if date not in worker_hours: worker_hours[date] = {", "req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field =", "list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10, -1) if 'from_date' in req.args: from_date_raw", "now.year - 10, -1) if 'from_date' in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date'])", "in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date']", "entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.) for worker,", "import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req, method, filename,", "\"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets = [tid", "user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw = from_date_raw -", "writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker, hours]) return content.getvalue(), '%s;text/csv' %", "= Milestone.select(self.env) data['milestones'] = milestones # get the hours # trachours = TracHoursPlugin(self.env)", "in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE worker=%s", "import datetime, timedelta from pkg_resources import parse_version from genshi.filters import Transformer from genshi.filters.transform", "trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours # update date for oldest ticket", "for ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours", "milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format", "parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7)", "worker_hours = [(key[0], key[1], seconds / 3600.) for key, seconds in sorted(worker_hours.items())] data['details']", "Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"),", "# import re import calendar import csv import time from StringIO import StringIO", "from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query,", "*args) worker_hours = {} for entry in hours: date = user_time(req, format_date, entry['time_started'])", "= \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets =", "ticket) # estimated hours for the ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError,", "'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html'", "if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath).", "'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones') this_milestone", "in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours =", "milestone data milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones # get", "Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours", "def export_csv(self, req, data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer", "req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones # get the hours # trachours", "from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return", "rights reserved. # # This software is licensed as described in the file", "from trac import __version__ as TRAC_VERSION from trac.core import * from trac.ticket import", "writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket,", "field == 'tickets': return self.user_by_ticket(req, user) elif field == 'dates': return self.user_by_date(req, user)", "**args) def users(self, req): \"\"\"hours for all users\"\"\" data = {'hours_format': hours_format} #", "for key, seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours data['total_hours'] =", "req, data): \"\"\"data for the date\"\"\" now = datetime.now() data['days'] = range(1, 32)", "entry in hours: worker = entry['worker'] if worker not in worker_hours: worker_hours[worker] =", "data['total_hours'] = sum(hours[-1] for hours in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data))", "StreamBuffer from trac import __version__ as TRAC_VERSION from trac.core import * from trac.ticket", "args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self, req): \"\"\"hours", "entry['worker'] if worker not in worker_hours: worker_hours[worker] = 0 if milestone and milestone", "else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7) if 'to_date'", "field == 'dates': return self.user_by_date(req, user) # Internal methods def date_data(self, req, data):", "# sort by ticket number and convert to hours worker_hours = [(date, details['tickets'],", "dict(totalhours=0., estimatedhours=0., ) tickets = [tid for tid, in self.env.db_query(\"\"\" SELECT id FROM", "\\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones')", "is not None: # for /milestone/xxx milestone = self.this_milestone else: milestone = self.buffer.events[3][1]", "= [tid for tid, in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s \"\"\",", "WHERE worker=%s AND time_started >= %s AND time_started < %s \"\"\", *args) worker_hours", "= [(date, details['tickets'], details['seconds'] / 3600.) for date, details in sorted(worker_hours.items())] data['worker_hours'] =", "/ 3600.) for date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2]", "user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self, req): \"\"\"hours for all", "data['milestones'] = milestones # get the hours # trachours = TracHoursPlugin(self.env) # tickets", "context.href), # _('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html',", "_('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date']))", "tickets = [tid for tid, in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s", "delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'],", "worker_hours = [(date, details['tickets'], details['seconds'] / 3600.) for date, details in sorted(worker_hours.items())] data['worker_hours']", "entry in hours]) # sort by ticket number and convert to hours worker_hours", "href self.this_milestone = this_milestone def __iter__(self): if self.this_milestone is not None: # for", "this distribution. # import re import calendar import csv import time from StringIO", "== '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user =", "= details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for hours in worker_hours) if", "{ 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in", "in hours]) # sort by ticket number and convert to hours worker_hours =", "Ticket(self.env, entry['ticket'])) for entry in hours]) # sort by ticket number and convert", "data, 'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') #", "in sorted(worker_hours.items())] else: for entry in hours: date = user_time(req, format_date, entry['time_started']) worker", "to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html',", "req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'],", "self.this_milestone else: milestone = self.buffer.events[3][1] if milestone not in self.hours.keys(): return iter([]) hours", "ticket number and convert to hours worker_hours = [(date, details['tickets'], details['seconds'] / 3600.)", "def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)',", "self.req = req self.buffer = buffer self.hours = hours self.href = href self.this_milestone", "i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE", "= get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE worker=%s AND time_started >= %s", "'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user): \"\"\"hours page for", "for id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours", "user): \"\"\"hours page for a single user\"\"\" data = {'hours_format': hours_format, 'worker': user}", "* FROM ticket_time WHERE time_started >= %s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple()))", "worker_hours[key] = 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key]", "+= entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket']))", "items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component):", "hours worker_hours = [(date, details['tickets'], details['seconds'] / 3600.) for date, details in sorted(worker_hours.items())]", "worker_hours: ids = ['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv')", "estimatedhours=0., ) tickets = [tid for tid, in self.env.db_query(\"\"\" SELECT id FROM ticket", "from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw =", "for i in worker_hours.keys()]) # sort by ticket number and convert to hours", "milestone = self.this_milestone else: milestone = self.buffer.events[3][1] if milestone not in self.hours.keys(): return", "for hours in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO() writer =", "time from StringIO import StringIO from datetime import datetime, timedelta from pkg_resources import", "'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'),", "the date\"\"\" now = datetime.now() data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years']", "from trac.ticket.model import Milestone from trac.util.datefmt import format_date, parse_date, user_time from trac.util.html import", "'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req,", "ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href,", "(C) 2009 <NAME> <<EMAIL>> # All rights reserved. # # This software is", "iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours", "req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self,", "ticket_time WHERE worker=%s AND time_started >= %s AND time_started < %s \"\"\", *args)", "worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env,", "range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10, -1) if", "/milestone/xxx milestone = self.this_milestone else: milestone = self.buffer.events[3][1] if milestone not in self.hours.keys():", "% total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link),", "from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv',", "hours[milestone.name]['totalhours'] += total_hours # update date for oldest ticket if ticket['time'] < hours[milestone.name]['date']:", "args = [user] args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours =", "from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self, req): \"\"\"hours for all users\"\"\" data", "get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self,", "= user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if date not in worker_hours: worker_hours[date]", "part of this distribution. # import re import calendar import csv import time", "= to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw'] = from_date_raw", "in worker_hours: ids = ['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(),", "stream for the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html',", "{} for entry in hours: ticket = entry['ticket'] if ticket not in worker_hours:", "# -*- coding: utf-8 -*- # # Copyright (C) 2009 <NAME> <<EMAIL>> #", "tickets, hours in worker_hours: ids = ['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids),", "trac.util.datefmt import format_date, parse_date, user_time from trac.util.html import html as tag from trac.util.translation", "reserved. # # This software is licensed as described in the file COPYING,", "genshi.filters.transform import StreamBuffer from trac import __version__ as TRAC_VERSION from trac.core import *", "+= estimated_hours # total hours for the ticket (seconds -> hours) total_hours =", "self.this_milestone = this_milestone def __iter__(self): if self.this_milestone is not None: # for /milestone/xxx", "WHERE time_started >= %s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in", "/ 3600.) for key, seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours", "worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()]) #", "%s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details", "to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else:", "csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To'])", "time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details')", "Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket = Ticket(self.env, ticket) # estimated hours", "to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user),", "get the hours # trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours =", "_(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone',", "= trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours # update date for oldest", "= hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'):", "data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']:", "import get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def", "self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if", "for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for", "writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets,", "by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv',", "import calendar import csv import time from StringIO import StringIO from datetime import", "Transformer markup injection\"\"\" def __init__(self, req, buffer, hours, href, this_milestone): self.req = req", "to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args,", "(ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for the", "= entry['ticket'] if date not in worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets':", "licensed as described in the file COPYING, which # you should have received", "not in self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours =", "= Ticket(self.env, ticket) # estimated hours for the ticket try: estimated_hours = float(ticket['estimatedhours'])", "to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req, format_date,", "calendar import csv import time from StringIO import StringIO from datetime import datetime,", "__init__(self, req, buffer, hours, href, this_milestone): self.req = req self.buffer = buffer self.hours", "- timedelta(days=7) if 'to_date' in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw =", "user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date'] =", "= entry['worker'] key = (date, worker) if key not in worker_hours: worker_hours[key] =", "data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep,", "date not in worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds']", "[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT * FROM", "for /milestone/xxx milestone = self.this_milestone else: milestone = self.buffer.events[3][1] if milestone not in", "users\"\"\" data = {'hours_format': hours_format} # date data self.date_data(req, data) # milestone data", "data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker, hours])", "[(worker, seconds / 3600.) for worker, seconds in sorted(worker_hours.items())] else: for entry in", "= [data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: #", "= [(ticket_id, seconds / 3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours", "_('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv',", "milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else:", "[data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap", "sorted(worker_hours.items())] else: for entry in hours: date = user_time(req, format_date, entry['time_started']) worker =", "data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] =", "from genshi.filters.transform import StreamBuffer from trac import __version__ as TRAC_VERSION from trac.core import", "def __iter__(self): if self.this_milestone is not None: # for /milestone/xxx milestone = self.this_milestone", ": only one milestone milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\"", "= get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE time_started >= %s AND time_started", "= user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self, req): \"\"\"hours for", "by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv')", "TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req, method, filename, stream, data): \"\"\"", "ticket = entry['ticket'] if ticket not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] +=", "if date not in worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets': [], }", "0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()])", "milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets = [tid for tid, in self.env.db_query(\"\"\"", "= [(key[0], key[1], seconds / 3600.) for key, seconds in sorted(worker_hours.items())] data['details'] =", "writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in", "this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view", "0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for the ticket (seconds -> hours)", "milestone = self.buffer.events[3][1] if milestone not in self.hours.keys(): return iter([]) hours = self.hours[milestone]", "# prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self,", "self.user_by_ticket(req, user) elif field == 'dates': return self.user_by_date(req, user) # Internal methods def", "SELECT * FROM ticket_time WHERE worker=%s AND time_started >= %s AND time_started <", "format_date, entry['time_started']) worker = entry['worker'] key = (date, worker) if key not in", "parse_date, user_time from trac.util.html import html as tag from trac.util.translation import _ from", "= 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for i in", "writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req,", "dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours]) # sort by ticket number and", "ticket in tickets: ticket = Ticket(self.env, ticket) # estimated hours for the ticket", "get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self,", "from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data,", "req.path_info.rstrip('/') == '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user", "ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\", "items = [] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else:", "class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self):", "worker, seconds in sorted(worker_hours.items())] else: for entry in hours: date = user_time(req, format_date,", "method, filename, stream, data): \"\"\" filter the stream for the roadmap (/roadmap) and", "pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self, req): return", "= range(now.year, now.year - 10, -1) if 'from_date' in req.args: from_date_raw = user_time(req,", "worker_hours = [(worker, seconds / 3600.) for worker, seconds in sorted(worker_hours.items())] else: for", "'to_date_raw')]) details = req.args.get('details') worker_hours = {} if details != 'date': for entry", "%s \"\"\", *args) worker_hours = {} for entry in hours: date = user_time(req,", "and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker,", "milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds", "except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for", "methods def date_data(self, req, data): \"\"\"data for the date\"\"\" now = datetime.now() data['days']", "writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date']))", "range(now.year, now.year - 10, -1) if 'from_date' in req.args: from_date_raw = user_time(req, parse_date,", "total_hours # update date for oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] =", "sort by ticket number and convert to hours worker_hours = [(ticket_id, seconds /", "<<EMAIL>> # All rights reserved. # # This software is licensed as described", "dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()]) # sort by ticket number and", "in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for hours in worker_hours) if", "ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours # update date for oldest ticket if", "import __version__ as TRAC_VERSION from trac.core import * from trac.ticket import Ticket from", "hours: date = user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if date not in", "req, data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content,", "req, buffer, hours, href, this_milestone): self.req = req self.buffer = buffer self.hours =", "hours import TracHoursPlugin, _ from sqlhelper import get_all_dict from utils import hours_format class", "\"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours = {}", "# This software is licensed as described in the file COPYING, which #", "seconds / 3600.) for worker, seconds in sorted(worker_hours.items())] else: for entry in hours:", "details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by", "data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10, -1) if 'from_date' in", "datetime import datetime, timedelta from pkg_resources import parse_version from genshi.filters import Transformer from", "items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date = hours['date']", "# sort by ticket number and convert to hours worker_hours = [(ticket_id, seconds", "data['total_hours'] = sum(hours[2] for hours in worker_hours) if req.args.get('format') == 'csv': buffer =", "hours in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO() writer = csv.writer(buffer)", "if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req,", "format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self, req): \"\"\"hours for all users\"\"\"", "content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name)", "= hours self.href = href self.this_milestone = this_milestone def __iter__(self): if self.this_milestone is", "= datetime(now.year, now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7) if 'to_date' in req.args:", "!= 'date': for entry in hours: worker = entry['worker'] if worker not in", "float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours", "# /milestone view : only one milestone milestones = [data['milestone']] this_milestone = milestones[0].name", "Hours: \"), hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider,", "data['total_hours'] = sum(hours[1] for hours in worker_hours) if req.args.get('format') == 'csv': buffer =", "time_started >= %s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw',", "hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'),", "entry['time_started']) ticket = entry['ticket'] if date not in worker_hours: worker_hours[date] = { 'seconds':", "data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker, hours]) return content.getvalue(),", "= buffer self.hours = hours self.href = href self.this_milestone = this_milestone def __iter__(self):", "'/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if", "seconds / 3600.) for key, seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] =", "for i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time", "import Milestone from trac.util.datefmt import format_date, parse_date, user_time from trac.util.html import html as", "writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css')", "/ 3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1]", "[], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] =", "0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked']", "context.href), # _('Prev Week')) # add_link(req, 'next', self.get_href(query, args, context.href), # _('Next Week'))", "# update date for oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time']", "in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details ==", "'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user): \"\"\"hours page", "buffer self.hours = hours self.href = href self.this_milestone = this_milestone def __iter__(self): if", "in worker_hours: worker_hours[key] = 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'):", "by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req,", "is None: # /milestone view : only one milestone milestones = [data['milestone']] this_milestone", "= hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours or total_hours): return iter([]) items", "if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return", "req): return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def", "or total_hours): return iter([]) items = [] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'):", "AND time_started >= %s AND time_started < %s \"\"\", *args) worker_hours = {}", "= \\ Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket = Ticket(self.env, ticket) #", "entry in hours: date = user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if date", "ticket = entry['ticket'] if date not in worker_hours: worker_hours[date] = { 'seconds': 0,", "return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user): \"\"\"hours page for a single", "time_started < %s \"\"\", *args) worker_hours = {} for entry in hours: date", "Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by", "date_data(self, req, data): \"\"\"data for the date\"\"\" now = datetime.now() data['days'] = range(1,", "software is licensed as described in the file COPYING, which # you should", "href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link), class_='interval')) return", "'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours page for a single user\"\"\"", "if req.args.get('format') == 'csv': buffer = StringIO() writer = csv.writer(buffer) title = _(\"Hours", "self.buffer.events[3][1] if milestone not in self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours =", "\"\"\" SELECT * FROM ticket_time WHERE time_started >= %s AND time_started < %s", "trac.core import * from trac.ticket import Ticket from trac.ticket.model import Milestone from trac.util.datefmt", "return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'): content = StringIO()", "in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets = [tid for tid, in", "<NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as", "format_date, parse_date, user_time from trac.util.html import html as tag from trac.util.translation import _", "user_time from trac.util.html import html as tag from trac.util.translation import _ from trac.web.api", "writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in worker_hours: ids = ['#{}'.format(id) for id", "= csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From',", "roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html') and \\", "= self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours or total_hours):", "= self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link)))", "WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket", "add_link(req, 'next', self.get_href(query, args, context.href), # _('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next", "AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details =", "get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] #", "hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req, method, filename, stream,", "= list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10, -1) if 'from_date' in req.args:", "in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours]) #", "'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data, sep=',',", "req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in", "import re import calendar import csv import time from StringIO import StringIO from", "\"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self, req, buffer, hours, href, this_milestone): self.req", "# /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones:", "milestone not in self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours", "in hours: date = user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if date not", "= {'hours_format': hours_format, 'worker': user} self.date_data(req, data) args = [user] args += [int(time.mktime(data[i].timetuple()))", "= _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket',", "iter([]) items = [] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours)))", "StringIO import StringIO from datetime import datetime, timedelta from pkg_resources import parse_version from", "\\ Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket = Ticket(self.env, ticket) # estimated", "_('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'],", "BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()])", "one milestone milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath =", "\"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath =", "date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') #", "ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by", "= now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw", "_('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date']))", "the stream for the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if filename in", "tickets: ticket = Ticket(self.env, ticket) # estimated hours for the ticket try: estimated_hours", "req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req,", "add_ctxtnav, add_link, add_stylesheet ) from hours import TracHoursPlugin, _ from sqlhelper import get_all_dict", "single user\"\"\" data = {'hours_format': hours_format, 'worker': user} self.date_data(req, data) args = [user]", "self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] =", "'/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user = m.group('user')", "worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for", "\"\"\", *args) worker_hours = {} for entry in hours: ticket = entry['ticket'] if", "as tag from trac.util.translation import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome", "total hours for the ticket (seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id) /", "\"\"\" if filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours =", "'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev Week')) # add_link(req,", "return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user = m.group('user') if", "User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate',", "process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info)", "not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours])", "# date data self.date_data(req, data) # milestone data milestone = req.args.get('milestone') milestones =", "All rights reserved. # # This software is licensed as described in the", "'worker': user} self.date_data(req, data) args = [user] args += [int(time.mktime(data[i].timetuple())) for i in", "format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self,", "get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE time_started >= %s AND time_started <", "filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours", "if details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours", "only one milestone milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath", "for ticket in tickets: ticket = Ticket(self.env, ticket) # estimated hours for the", "req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59)", "'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours = {} milestones", "self.this_milestone is not None: # for /milestone/xxx milestone = self.this_milestone else: milestone =", "iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend'))", "[resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self, req): return req.path_info == '/hours/user' or", "_('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user):", "details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for hours in worker_hours) if req.args.get('format')", "trac.ticket.model import Milestone from trac.util.datefmt import format_date, parse_date, user_time from trac.util.html import html", "worker = entry['worker'] if worker not in worker_hours: worker_hours[worker] = 0 if milestone", "xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\"", "'prev', self.get_href(query, args, context.href), # _('Prev Week')) # add_link(req, 'next', self.get_href(query, args, context.href),", "page for a single user\"\"\" data = {'hours_format': hours_format, 'worker': user} self.date_data(req, data)", "FROM ticket_time WHERE worker=%s AND time_started >= %s AND time_started < %s \"\"\",", "for entry in hours]) # sort by ticket number and convert to hours", "_('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv')", "= from_date_raw - timedelta(days=7) if 'to_date' in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date'])", "Transformer from genshi.filters.transform import StreamBuffer from trac import __version__ as TRAC_VERSION from trac.core", "{} for entry in hours: date = user_time(req, format_date, entry['time_started']) ticket = entry['ticket']", "for oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer()", "None: # /milestone view : only one milestone milestones = [data['milestone']] this_milestone =", "[] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours:", "== 'dates': return self.user_by_date(req, user) # Internal methods def date_data(self, req, data): \"\"\"data", "None: # for /milestone/xxx milestone = self.this_milestone else: milestone = self.buffer.events[3][1] if milestone", "to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info,", "milestones = data.get('milestones') this_milestone = None if milestones is None: # /milestone view", "items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format %", "Ticket from trac.ticket.model import Milestone from trac.util.datefmt import format_date, parse_date, user_time from trac.util.html", "stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self, req, buffer, hours,", "req self.buffer = buffer self.hours = hours self.href = href self.this_milestone = this_milestone", "hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) #", "32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10, -1) if 'from_date'", "req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev", "worker_hours.keys()]) # sort by ticket number and convert to hours worker_hours = [(ticket_id,", "hours = {} milestones = data.get('milestones') this_milestone = None if milestones is None:", "append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer", "methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__,", "TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones') this_milestone = None if milestones is", "# Copyright (C) 2009 <NAME> <<EMAIL>> # All rights reserved. # # This", "< hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append(", "total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link), class_='interval'))", "= self.buffer.events[3][1] if milestone not in self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours", "req): \"\"\"hours for all users\"\"\" data = {'hours_format': hours_format} # date data self.date_data(req,", ") tickets = [tid for tid, in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE", "for entry in hours: date = user_time(req, format_date, entry['time_started']) worker = entry['worker'] key", "if milestones is None: # /milestone view : only one milestone milestones =", "worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if", "hours_format, 'worker': user} self.date_data(req, data) args = [user] args += [int(time.mktime(data[i].timetuple())) for i", "for date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for hours", "Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'): content =", "'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'],", "methods def match_request(self, req): return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is", "worker_hours[worker] = 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker]", "the file COPYING, which # you should have received as part of this", "hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket = Ticket(self.env, ticket)", "MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self, req, buffer, hours, href, this_milestone):", "from pkg_resources import parse_version from genshi.filters import Transformer from genshi.filters.transform import StreamBuffer from", "trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet", "tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours,", "implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources", "FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time']", "hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total", "distribution. # import re import calendar import csv import time from StringIO import", "not in worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] +=", "total_hours): return iter([]) items = [] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated", "< parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else:", "milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets = [tid for tid,", "= trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE time_started >=", "for entry in hours: worker = entry['worker'] if worker not in worker_hours: worker_hours[worker]", "_('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV',", "'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self,", "self.buffer = buffer self.hours = hours self.href = href self.this_milestone = this_milestone def", "field = m.group('field') user = m.group('user') if field == 'tickets': return self.user_by_ticket(req, user)", "import StringIO from datetime import datetime, timedelta from pkg_resources import parse_version from genshi.filters", "\"\"\", *args) worker_hours = {} for entry in hours: date = user_time(req, format_date,", "items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date = hours['date'] link =", "data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw)", "hours_format} # date data self.date_data(req, data) # milestone data milestone = req.args.get('milestone') milestones", "to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html'", "entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds / 3600.) for key, seconds in sorted(worker_hours.items())]", "for the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html')", "all users\"\"\" data = {'hours_format': hours_format} # date data self.date_data(req, data) # milestone", "ITemplateStreamFilter methods def filter_stream(self, req, method, filename, stream, data): \"\"\" filter the stream", "filename, stream, data): \"\"\" filter the stream for the roadmap (/roadmap) and milestones", "= ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours,", "== 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours summary'),", "= {} for entry in hours: ticket = entry['ticket'] if ticket not in", "not in worker_hours: worker_hours[worker] = 0 if milestone and milestone != \\ Ticket(self.env,", "ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for hours in", "TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from", "'csv') # add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev Week')) # add_link(req, 'next',", "data): \"\"\" filter the stream for the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\"", "# _('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data,", "= user_time(req, format_date, entry['time_started']) worker = entry['worker'] key = (date, worker) if key", "re import calendar import csv import time from StringIO import StringIO from datetime", "in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for hours", "hours in worker_hours: ids = ['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids), hours])", "m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user = m.group('user') if field ==", "Milestone.select(self.env) data['milestones'] = milestones # get the hours # trachours = TracHoursPlugin(self.env) #", "if ticket not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] =", "ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources import resource_filename return", "export_csv(self, req, data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer =", "for the ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0.", "user) elif field == 'dates': return self.user_by_date(req, user) # Internal methods def date_data(self,", "for Transformer markup injection\"\"\" def __init__(self, req, buffer, hours, href, this_milestone): self.req =", "href, this_milestone): self.req = req self.buffer = buffer self.hours = hours self.href =", "'templates')] # IRequestHandler methods def match_request(self, req): return req.path_info == '/hours/user' or \\", "for a single user\"\"\" data = {'hours_format': hours_format, 'worker': user} self.date_data(req, data) args", "if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker,", "'dates': return self.user_by_date(req, user) # Internal methods def date_data(self, req, data): \"\"\"data for", "as part of this distribution. # import re import calendar import csv import", "and convert to hours worker_hours = [(ticket_id, seconds / 3600.) for ticket_id, seconds", "not None: # for /milestone/xxx milestone = self.this_milestone else: milestone = self.buffer.events[3][1] if", "quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']])", "= user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw = from_date_raw", "worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for i", "StringIO from datetime import datetime, timedelta from pkg_resources import parse_version from genshi.filters import", "re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user = m.group('user') if field == 'tickets': return", "a single user\"\"\" data = {'hours_format': hours_format, 'worker': user} self.date_data(req, data) args =", "sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for hours in worker_hours) if req.args.get('format')", "[user] args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\"", "(/roadmap) and milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS'", "Hours: \"), str(estimated_hours), class_=\"first interval\")) date = hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req,", "to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw'] =", "args = dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw)", "from trac.util.html import html as tag from trac.util.translation import _ from trac.web.api import", "format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req,", "class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return [] def", "in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i))", "data = {'hours_format': hours_format, 'worker': user} self.date_data(req, data) args = [user] args +=", "return iter([]) items = [] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\")))", "you should have received as part of this distribution. # import re import", "time_started >= %s AND time_started < %s \"\"\", *args) worker_hours = {} for", "m.group('field') user = m.group('user') if field == 'tickets': return self.user_by_ticket(req, user) elif field", "b, hours, req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\"", "else: for entry in hours: date = user_time(req, format_date, entry['time_started']) worker = entry['worker']", "continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds / 3600.) for key,", "for hours in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if", "_('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req,", "for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']])", "'to_date' in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23,", "= href self.this_milestone = this_milestone def __iter__(self): if self.this_milestone is not None: #", "class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req, method, filename, stream, data):", "import parse_version from genshi.filters import Transformer from genshi.filters.transform import StreamBuffer from trac import", "hours # trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\"", "req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours'])", "writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker',", "import html as tag from trac.util.translation import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter", "data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours]) # sort by ticket", "hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b,", "req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7) if", "'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']:", "implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req, method, filename, stream, data): \"\"\" filter", "continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.) for worker, seconds", "SELECT id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\", "= milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath", "this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self, req,", "= \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0.,", "ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for", "(seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours #", "+ timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date'] =", "+= [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT *", "data, 'text/html' def user_by_date(self, req, user): \"\"\"hours page for a single user\"\"\" data", "m.group('user') if field == 'tickets': return self.user_by_ticket(req, user) elif field == 'dates': return", "writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in worker_hours:", "content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title =", "= req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones # get the hours #", "from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours import", "for milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets = [tid for", "= dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url']", "def match_request(self, req): return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not", "data milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones # get the", "Ticket(self.env, ticket) # estimated hours for the ticket try: estimated_hours = float(ticket['estimatedhours']) except", "= {} milestones = data.get('milestones') this_milestone = None if milestones is None: #", "{} if details != 'date': for entry in hours: worker = entry['worker'] if", "details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for hours in worker_hours)", "def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')]", "elif field == 'dates': return self.user_by_date(req, user) # Internal methods def date_data(self, req,", "'from_date' in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month,", "(date, worker) if key not in worker_hours: worker_hours[key] = 0 if milestone and", "from datetime import datetime, timedelta from pkg_resources import parse_version from genshi.filters import Transformer", "TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time", "user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw =", "date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return", "IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from", "link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"),", "req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req,", "self.get_href(query, args, context.href), # _('Prev Week')) # add_link(req, 'next', self.get_href(query, args, context.href), #", "else: to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw']", "def user_by_date(self, req, user): \"\"\"hours page for a single user\"\"\" data = {'hours_format':", "hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets = [tid for tid, in self.env.db_query(\"\"\" SELECT", "milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath =", "writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours:", "= {} if details != 'date': for entry in hours: worker = entry['worker']", "add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def", "in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day)", "= sum(hours[2] for hours in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO()", "href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def", "def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods def", "for entry in hours: date = user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if", "req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year, now.month, now.day) from_date_raw", "Internal methods def date_data(self, req, data): \"\"\"data for the date\"\"\" now = datetime.now()", "# _('Prev Week')) # add_link(req, 'next', self.get_href(query, args, context.href), # _('Next Week')) #", "now = datetime.now() data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year,", "('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours = {}", "from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self, req):", "req.args.get('format') == 'csv': buffer = StringIO() writer = csv.writer(buffer) title = _(\"Hours for", "import csv import time from StringIO import StringIO from datetime import datetime, timedelta", "= \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for", "by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours", "= 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for the ticket (seconds ->", "add_link, add_stylesheet ) from hours import TracHoursPlugin, _ from sqlhelper import get_all_dict from", "= this_milestone def __iter__(self): if self.this_milestone is not None: # for /milestone/xxx milestone", "= StringIO() writer = csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([])", "Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.) for", "add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date']))", "else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'),", "in hours: worker = entry['worker'] if worker not in worker_hours: worker_hours[worker] = 0", "data)) add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date']))", "now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date']", "add_stylesheet ) from hours import TracHoursPlugin, _ from sqlhelper import get_all_dict from utils", "total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours # update date for", "format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev Week'))", "return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self, req, buffer,", "in req.perm: trac_hours = TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones') this_milestone =", "# Internal methods def date_data(self, req, data): \"\"\"data for the date\"\"\" now =", "= [(worker, seconds / 3600.) for worker, seconds in sorted(worker_hours.items())] else: for entry", "self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append(", "worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours]) # sort by", "-*- coding: utf-8 -*- # # Copyright (C) 2009 <NAME> <<EMAIL>> # All", "ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours import TracHoursPlugin, _ from sqlhelper import", "parse_version from genshi.filters import Transformer from genshi.filters.transform import StreamBuffer from trac import __version__", "= entry['worker'] if worker not in worker_hours: worker_hours[worker] = 0 if milestone and", "# add_link(req, 'next', self.get_href(query, args, context.href), # _('Next Week')) # prevnext_nav(req, _('Prev Week'),", "milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm:", "worker = entry['worker'] key = (date, worker) if key not in worker_hours: worker_hours[key]", "described in the file COPYING, which # you should have received as part", "date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for hours in", "= StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours", "hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours or total_hours): return iter([]) items =", "in self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours']", "-*- # # Copyright (C) 2009 <NAME> <<EMAIL>> # All rights reserved. #", "This software is licensed as described in the file COPYING, which # you", "details['tickets'], details['seconds'] / 3600.) for date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours']", "worker not in worker_hours: worker_hours[worker] = 0 if milestone and milestone != \\", "self.hours = hours self.href = href self.this_milestone = this_milestone def __iter__(self): if self.this_milestone", "sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for hours in worker_hours) if req.args.get('format')", "convert to hours worker_hours = [(date, details['tickets'], details['seconds'] / 3600.) for date, details", "/ 3600.) for worker, seconds in sorted(worker_hours.items())] else: for entry in hours: date", "'text/html' def user_by_ticket(self, req, user): \"\"\"hours page for a single user\"\"\" data =", "genshi.filters import Transformer from genshi.filters.transform import StreamBuffer from trac import __version__ as TRAC_VERSION", "req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self, req):", "/ 3600.0 hours[milestone.name]['totalhours'] += total_hours # update date for oldest ticket if ticket['time']", "and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0],", "= sum(hours[1] for hours in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO()", "for all users\"\"\" data = {'hours_format': hours_format} # date data self.date_data(req, data) #", "self.date_data(req, data) args = [user] args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]", "if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field')", "+= entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.) for worker, seconds in sorted(worker_hours.items())]", "in hours: date = user_time(req, format_date, entry['time_started']) worker = entry['worker'] key = (date,", "stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream class", "add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']),", "iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return []", "from trac.ticket import Ticket from trac.ticket.model import Milestone from trac.util.datefmt import format_date, parse_date,", "import * from trac.ticket import Ticket from trac.ticket.model import Milestone from trac.util.datefmt import", "= from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date']", "tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'],", "timedelta from pkg_resources import parse_version from genshi.filters import Transformer from genshi.filters.transform import StreamBuffer", "data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7)", "datetime(now.year, now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7) if 'to_date' in req.args: to_date_raw", "to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date']", "entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds / 3600.) for", "data['tickets'] = dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()]) # sort by ticket", "writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for", "summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date']))", "= user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week']", "received as part of this distribution. # import re import calendar import csv", "None if milestones is None: # /milestone view : only one milestone milestones", "ticket (seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours", "hours: worker = entry['worker'] if worker not in worker_hours: worker_hours[worker] = 0 if", "worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.) for worker, seconds in", "str(estimated_hours), class_=\"first interval\")) date = hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date))", "title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([])", "by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req,", "ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv')", "for i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours = {} if details", "= _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']:", "entry['ticket'])) for entry in hours]) # sort by ticket number and convert to", "data) args = [user] args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours", "and convert to hours worker_hours = [(date, details['tickets'], details['seconds'] / 3600.) for date,", "worker_hours: worker_hours[key] = 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue", "Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def", "AND time_started < %s \"\"\", *args) worker_hours = {} for entry in hours:", "entry in hours: date = user_time(req, format_date, entry['time_started']) worker = entry['worker'] key =", "TracHoursPlugin, _ from sqlhelper import get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter)", "* FROM ticket_time WHERE worker=%s AND time_started >= %s AND time_started < %s", "writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker, hours]) return content.getvalue(), '%s;text/csv'", "details != 'date': for entry in hours: worker = entry['worker'] if worker not", "trac import __version__ as TRAC_VERSION from trac.core import * from trac.ticket import Ticket", "hours, href, this_milestone): self.req = req self.buffer = buffer self.hours = hours self.href", "%s \"\"\", *args) worker_hours = {} for entry in hours: ticket = entry['ticket']", "data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv')", "id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env,", "ticket number and convert to hours worker_hours = [(ticket_id, seconds / 3600.) for", "details['seconds'] / 3600.) for date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] =", "in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for hours in worker_hours) if", "= dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours]) # sort by ticket number", "= 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] +=", "buffer, hours, href, this_milestone): self.req = req self.buffer = buffer self.hours = hours", "import StreamBuffer from trac import __version__ as TRAC_VERSION from trac.core import * from", "/roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name]", ">= %s AND time_started < %s \"\"\", *args) worker_hours = {} for entry", "by ticket number and convert to hours worker_hours = [(ticket_id, seconds / 3600.)", "\"), hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler)", "to hours worker_hours = [(ticket_id, seconds / 3600.) for ticket_id, seconds in sorted(worker_hours.items())]", "3600.) for worker, seconds in sorted(worker_hours.items())] else: for entry in hours: date =", "this_milestone = None if milestones is None: # /milestone view : only one", "worker_hours = [(ticket_id, seconds / 3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] =", "hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours or", "in worker_hours) if req.args.get('format') == 'csv': buffer = StringIO() writer = csv.writer(buffer) title", "'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets']", "trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours import TracHoursPlugin,", "\\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') ==", "from trac.util.datefmt import format_date, parse_date, user_time from trac.util.html import html as tag from", "entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for", "\\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for", "\"\"\"hours page for a single user\"\"\" data = {'hours_format': hours_format, 'worker': user} self.date_data(req,", "/milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours", "(milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket", "import Transformer from genshi.filters.transform import StreamBuffer from trac import __version__ as TRAC_VERSION from", "if details != 'date': for entry in hours: worker = entry['worker'] if worker", "add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'],", "3600.0 hours[milestone.name]['totalhours'] += total_hours # update date for oldest ticket if ticket['time'] <", "COPYING, which # you should have received as part of this distribution. #", "{user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket,", "data self.date_data(req, data) # milestone data milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones']", "the hours # trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env,", "= None if milestones is None: # /milestone view : only one milestone", "{project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([])", "entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.) for worker, seconds in sorted(worker_hours.items())] else:", "* from trac.ticket import Ticket from trac.ticket.model import Milestone from trac.util.datefmt import format_date,", "tid, in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if tickets:", "== '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS')", "date = user_time(req, format_date, entry['time_started']) worker = entry['worker'] key = (date, worker) if", "worker=%s AND time_started >= %s AND time_started < %s \"\"\", *args) worker_hours =", "+= entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds / 3600.) for key, seconds in", "StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for", "from sqlhelper import get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter", "if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first", "2009 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed", "class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def __init__(self, req, buffer, hours, href,", "find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\"", "= dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()]) # sort by ticket number", "[(ticket_id, seconds / 3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours']", "= [] if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated", "for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for hours", "data.get('milestones') this_milestone = None if milestones is None: # /milestone view : only", "= csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'],", "['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req,", "milestones # get the hours # trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours()", "estimated_hours # total hours for the ticket (seconds -> hours) total_hours = trac_hours.get_total_hours(", "from trac.core import * from trac.ticket import Ticket from trac.ticket.model import Milestone from", "worker_hours = {} for entry in hours: ticket = entry['ticket'] if ticket not", "'tickets': return self.user_by_ticket(req, user) elif field == 'dates': return self.user_by_date(req, user) # Internal", "args, context.href), # _('Prev Week')) # add_link(req, 'next', self.get_href(query, args, context.href), # _('Next", "= from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req,", "return self.user_by_ticket(req, user) elif field == 'dates': return self.user_by_date(req, user) # Internal methods", "entry['ticket'] if date not in worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets': [],", "if not (estimated_hours or total_hours): return iter([]) items = [] if estimated_hours: if", "return [resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self, req): return req.path_info == '/hours/user'", "('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours = {} if details != 'date': for", "writer = csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To'])", "'Hours']) for date, tickets, hours in worker_hours: ids = ['#{}'.format(id) for id in", "of this distribution. # import re import calendar import csv import time from", "from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req,", "FROM ticket_time WHERE time_started >= %s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for", "interval\")) date = hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION)", "IRequestHandler methods def match_request(self, req): return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info)", "self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user = m.group('user') if field", "writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker,", "'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args, context.href), #", "hours]) # sort by ticket number and convert to hours worker_hours = [(date,", "'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM", "self.date_data(req, data) # milestone data milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] =", "date = user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if date not in worker_hours:", "\\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds /", "estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours),", "pkg_resources import parse_version from genshi.filters import Transformer from genshi.filters.transform import StreamBuffer from trac", "date\"\"\" now = datetime.now() data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] =", "\"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone", "if worker not in worker_hours: worker_hours[worker] = 0 if milestone and milestone !=", "resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods def match_request(self, req): return req.path_info ==", "0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket)", "= worker_hours data['total_hours'] = sum(hours[2] for hours in worker_hours) if req.args.get('format') == 'csv':", "req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req)", "user) # Internal methods def date_data(self, req, data): \"\"\"data for the date\"\"\" now", "trac.ticket import Ticket from trac.ticket.model import Milestone from trac.util.datefmt import format_date, parse_date, user_time", "xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., ) tickets", "# ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources import resource_filename", "# trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT", "worker_hours[date] = { 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket", "3600.) for key, seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours data['total_hours']", "else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in", "milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1],", "to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user),", "worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date']))", "/milestone view : only one milestone milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath", "return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self,", "from genshi.filters import Transformer from genshi.filters.transform import StreamBuffer from trac import __version__ as", "!= \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds", "in hours: ticket = entry['ticket'] if ticket not in worker_hours: worker_hours[ticket] = 0", "parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total", "minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date,", "= to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args", "'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req,", "else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class", "import format_date, parse_date, user_time from trac.util.html import html as tag from trac.util.translation import", "== 'tickets': return self.user_by_ticket(req, user) elif field == 'dates': return self.user_by_date(req, user) #", "to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] =", "= req.args.get('details') worker_hours = {} if details != 'date': for entry in hours:", "Milestone from trac.util.datefmt import format_date, parse_date, user_time from trac.util.html import html as tag", "user_by_ticket(self, req, user): \"\"\"hours page for a single user\"\"\" data = {'hours_format': hours_format,", "def user_by_ticket(self, req, user): \"\"\"hours page for a single user\"\"\" data = {'hours_format':", "Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator", "# total hours for the ticket (seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id)", "= [user] args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env,", "return self.user_by_date(req, user) # Internal methods def date_data(self, req, data): \"\"\"data for the", "'text/html' def user_by_date(self, req, user): \"\"\"hours page for a single user\"\"\" data =", "_('Prev Week')) # add_link(req, 'next', self.get_href(query, args, context.href), # _('Next Week')) # prevnext_nav(req,", "ticket = Ticket(self.env, ticket) # estimated hours for the ticket try: estimated_hours =", "__iter__(self): if self.this_milestone is not None: # for /milestone/xxx milestone = self.this_milestone else:", "sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for hours in", "req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours", "== 'csv': buffer = StringIO() writer = csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user)", "writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([])", "for tid, in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))] if", "# get the hours # trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours", "items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date = hours['date'] link = self.href(\"hours\", milestone=milestone,", "to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data,", "# add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev Week')) # add_link(req, 'next', self.get_href(query,", "\"), str(estimated_hours), class_=\"first interval\")) date = hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date,", "trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE time_started >= %s", "is licensed as described in the file COPYING, which # you should have", "# you should have received as part of this distribution. # import re", "filter_stream(self, req, method, filename, stream, data): \"\"\" filter the stream for the roadmap", "parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\"))", "or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/')", "hours['totalhours'] if not (estimated_hours or total_hours): return iter([]) items = [] if estimated_hours:", "sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL)", "seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for", "to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'), req.href.hours('user/tickets/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'],", "milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours =", "data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours page for a single user\"\"\" data", "= milestones # get the hours # trachours = TracHoursPlugin(self.env) # tickets =", "in worker_hours: worker_hours[worker] = 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'):", "ticket not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i,", "[tid for tid, in self.env.db_query(\"\"\" SELECT id FROM ticket WHERE milestone=%s \"\"\", (milestone.name,))]", "in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'),", "date = hours['date'] link = self.href(\"hours\", milestone=milestone, from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) <", "worker_hours data['total_hours'] = sum(hours[2] for hours in worker_hours) if req.args.get('format') == 'csv': buffer", "% total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider", "_('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html'", "'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user): \"\"\"hours page for a single user\"\"\"", "from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link,", "Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours import TracHoursPlugin, _ from sqlhelper", "from_date=user_time(self.req, format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format %", "def date_data(self, req, data): \"\"\"data for the date\"\"\" now = datetime.now() data['days'] =", "milestones = Milestone.select(self.env) data['milestones'] = milestones # get the hours # trachours =", "tag from trac.util.translation import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import", "coding: utf-8 -*- # # Copyright (C) 2009 <NAME> <<EMAIL>> # All rights", "__version__ as TRAC_VERSION from trac.core import * from trac.ticket import Ticket from trac.ticket.model", "- timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req,", "user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] =", "\"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket in tickets:", "'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details == 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user',", "'date': for entry in hours: worker = entry['worker'] if worker not in worker_hours:", "StringIO() writer = csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From',", "file COPYING, which # you should have received as part of this distribution.", "< %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours", "worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in hours]) # sort", "# estimated hours for the ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError):", "if 'to_date' in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw +", "[(key[0], key[1], seconds / 3600.) for key, seconds in sorted(worker_hours.items())] data['details'] = details", "hours in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details", "req.href('/hours/user', **args) def users(self, req): \"\"\"hours for all users\"\"\" data = {'hours_format': hours_format}", "from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'),", "worker_hours: worker_hours[worker] = 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue", "TRAC_VERSION from trac.core import * from trac.ticket import Ticket from trac.ticket.model import Milestone", "def filter_stream(self, req, method, filename, stream, data): \"\"\" filter the stream for the", "= req self.buffer = buffer self.hours = hours self.href = href self.this_milestone =", "'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'): content", "hours worker_hours = [(ticket_id, seconds / 3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours']", "key = (date, worker) if key not in worker_hours: worker_hours[key] = 0 if", "_ from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav,", "in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59,", "entry['ticket'] if ticket not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets']", "convert to hours worker_hours = [(ticket_id, seconds / 3600.) for ticket_id, seconds in", "dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] =", "hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours", "parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date =", "tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE time_started", "html as tag from trac.util.translation import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter from", "tickets[0])['time'] for ticket in tickets: ticket = Ticket(self.env, ticket) # estimated hours for", "req.args.get('details') worker_hours = {} if details != 'date': for entry in hours: worker", "if field == 'tickets': return self.user_by_ticket(req, user) elif field == 'dates': return self.user_by_date(req,", "data['years'] = range(now.year, now.year - 10, -1) if 'from_date' in req.args: from_date_raw =", "hours: date = user_time(req, format_date, entry['time_started']) worker = entry['worker'] key = (date, worker)", "data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for hours in worker_hours) if req.args.get('format') ==", "req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info,", "# IRequestHandler methods def match_request(self, req): return req.path_info == '/hours/user' or \\ re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)',", "TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for the ticket", "|= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream class MilestoneMarkup(object):", "user_time(req, format_date, entry['time_started']) worker = entry['worker'] key = (date, worker) if key not", "from trac.util.translation import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import (", "= sum(hours[-1] for hours in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req,", "'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf')", "\\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds / 3600.)", "for entry in hours: ticket = entry['ticket'] if ticket not in worker_hours: worker_hours[ticket]", "data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year - 10,", "from_date_raw = datetime(now.year, now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7) if 'to_date' in", "number and convert to hours worker_hours = [(date, details['tickets'], details['seconds'] / 3600.) for", "now.month, now.day) from_date_raw = from_date_raw - timedelta(days=7) if 'to_date' in req.args: to_date_raw =", "# ITemplateStreamFilter methods def filter_stream(self, req, method, filename, stream, data): \"\"\" filter the", "req, method, filename, stream, data): \"\"\" filter the stream for the roadmap (/roadmap)", "if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket in tickets: ticket =", "estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for the ticket (seconds", "import time from StringIO import StringIO from datetime import datetime, timedelta from pkg_resources", "date for oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b =", "- 10, -1) if 'from_date' in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else:", "# milestone data milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones #", "SELECT * FROM ticket_time WHERE time_started >= %s AND time_started < %s \"\"\",", "= worker_hours data['total_hours'] = sum(hours[-1] for hours in worker_hours) if req.args.get('format') == 'csv':", "date, tickets, hours in worker_hours: ids = ['#{}'.format(id) for id in tickets] writer.writerow([date,", "writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours", "= TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT * FROM", "Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours page for", "add_link(req, 'alternate', req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data,", "for the date\"\"\" now = datetime.now() data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name))", "user_time(req, format_date, entry['time_started']) ticket = entry['ticket'] if date not in worker_hours: worker_hours[date] =", "worker) if key not in worker_hours: worker_hours[key] = 0 if milestone and milestone", "req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by", "(estimated_hours or total_hours): return iter([]) items = [] if estimated_hours: if parse_version(TRAC_VERSION) <", "date data self.date_data(req, data) # milestone data milestone = req.args.get('milestone') milestones = Milestone.select(self.env)", "data['prev_week'] = from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week'])", "StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream", "return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return", "and milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in", "milestone=%s \"\"\", (milestone.name,))] if tickets: hours[milestone.name]['date'] = \\ Ticket(self.env, tickets[0])['time'] for ticket in", "data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def users(self, req):", "import TracHoursPlugin, _ from sqlhelper import get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component):", "milestones is None: # /milestone view : only one milestone milestones = [data['milestone']]", "Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds / 3600.)", "timedelta(days=7) if 'to_date' in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw", "*[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')]) details = req.args.get('details') worker_hours = {} if", "hours[milestone.name]['estimatedhours'] += estimated_hours # total hours for the ticket (seconds -> hours) total_hours", "Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date = hours['date'] link", "req, user): \"\"\"hours page for a single user\"\"\" data = {'hours_format': hours_format, 'worker':", "('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE worker=%s AND", "add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date',", "in worker_hours: worker_hours[date] = { 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked']", "and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env) hours = {} milestones =", "self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup", "< %s \"\"\", *args) worker_hours = {} for entry in hours: date =", "data['details'] = details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for hours in worker_hours)", "milestone milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath = \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\"", "Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours page for a", "hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE worker=%s AND time_started >=", "[] def get_templates_dirs(self): from pkg_resources import resource_filename return [resource_filename(__name__, 'templates')] # IRequestHandler methods", "from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req,", "{user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date,", "writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in", "worker_hours) if req.args.get('format') == 'csv': buffer = StringIO() writer = csv.writer(buffer) title =", "< %s \"\"\", *args) worker_hours = {} for entry in hours: ticket =", "'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self, req, data, sep=',', mimetype='text/csv'):", "ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream |=", "now.day) from_date_raw = from_date_raw - timedelta(days=7) if 'to_date' in req.args: to_date_raw = user_time(req,", "hours: ticket = entry['ticket'] if ticket not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket]", "in worker_hours.keys()]) # sort by ticket number and convert to hours worker_hours =", "-1) if 'from_date' in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw =", "( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours import TracHoursPlugin, _ from", "'next', self.get_href(query, args, context.href), # _('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week'))", "= re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field = m.group('field') user = m.group('user') if field == 'tickets':", "= 0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] +=", "for worker, seconds in sorted(worker_hours.items())] else: for entry in hours: date = user_time(req,", "IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self): return [] def get_templates_dirs(self): from pkg_resources import", "req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by ticket'),", "format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw", "= float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours # total", "from hours import TracHoursPlugin, _ from sqlhelper import get_all_dict from utils import hours_format", "date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link)))", "utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods def filter_stream(self, req, method,", "data): \"\"\"data for the date\"\"\" now = datetime.now() data['days'] = range(1, 32) data['months']", "+= entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()]) # sort", "to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV',", "data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for hours in worker_hours) if req.args.get('format') ==", "format_date, date)) if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours,", "\"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] = dict(totalhours=0., estimatedhours=0., )", "prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req,", "by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req)", "by ticket number and convert to hours worker_hours = [(date, details['tickets'], details['seconds'] /", "req.path_info) is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return", "# BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title,", "user_by_date(self, req, user): \"\"\"hours page for a single user\"\"\" data = {'hours_format': hours_format,", "format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def export_csv(self,", "add_link(req, 'prev', self.get_href(query, args, context.href), # _('Prev Week')) # add_link(req, 'next', self.get_href(query, args,", "seconds / 3600.) for ticket_id, seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] =", "writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in worker_hours: ids = ['#{}'.format(id) for", "items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours:", "*args) worker_hours = {} for entry in hours: ticket = entry['ticket'] if ticket", "parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(tag.a(_(\"Total Hours:\"), href=link))) items.append( tag.dd(tag.a(hours_format % total_hours, href=link))) return iter(tag.dl(*items))", "# # This software is licensed as described in the file COPYING, which", "worker_hours = {} if details != 'date': for entry in hours: worker =", "3600.) for date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[2] for", "'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in worker_hours: ids", "= TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones') this_milestone = None if milestones", "view : only one milestone milestones = [data['milestone']] this_milestone = milestones[0].name find_xpath =", "the ticket (seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] +=", "ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket'])) for entry in", "seconds in sorted(worker_hours.items())] data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[1] for hours in worker_hours)", "<gh_stars>0 # -*- coding: utf-8 -*- # # Copyright (C) 2009 <NAME> <<EMAIL>>", "as described in the file COPYING, which # you should have received as", ") from hours import TracHoursPlugin, _ from sqlhelper import get_all_dict from utils import", "is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req)", "seconds in sorted(worker_hours.items())] else: for entry in hours: date = user_time(req, format_date, entry['time_started'])", "which # you should have received as part of this distribution. # import", "entry['time_started']) worker = entry['worker'] key = (date, worker) if key not in worker_hours:", "not in worker_hours: worker_hours[key] = 0 if milestone and milestone != \\ Ticket(self.env,", "self.get_href(query, args, context.href), # _('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req)", "_ from sqlhelper import get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) #", "update date for oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b", "req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m = re.match(r'/hours/user/(?P<field>\\w+)/(?P<user>\\w+)', req.path_info) field", "'csv': buffer = StringIO() writer = csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title,", "_(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours'])", "'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE worker=%s AND time_started", "get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE worker=%s AND time_started >= %s AND", "estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours'] += estimated_hours #", "hours self.href = href self.this_milestone = this_milestone def __iter__(self): if self.this_milestone is not", "the roadmap (/roadmap) and milestones /milestone/<milestone> \"\"\" if filename in ('roadmap.html', 'milestone_view.html') and", "= \"//div[@class='milestone']/h1\" xpath = \"//div[@class='milestone']/div[1]\" else: # /roadmap view find_xpath = \"//*[@class='milestone']//h2/a\" xpath", "self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours or total_hours): return", "return iter(tag.dl(*items)) else: items.append(tag.span(tag.a(_(\"Total Hours: \"), hours_format % total_hours, href=link), class_='interval')) return iter(tag.p(*items,", "to_date_raw data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args =", "Ticket(self.env, i)) for i in worker_hours.keys()]) # sort by ticket number and convert", "writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours in data['worker_hours']: writer.writerow([worker, hours]) return", "# # Copyright (C) 2009 <NAME> <<EMAIL>> # All rights reserved. # #", "if milestone not in self.hours.keys(): return iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours']", "req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req,", "Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html', data, 'text/html' def user_by_date(self, req, user): \"\"\"hours page for a", "','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req,", "\"\"\"hours for all users\"\"\" data = {'hours_format': hours_format} # date data self.date_data(req, data)", "not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env,", "timedelta(days=7) args = dict(req.args) args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date,", "title = _(\"Hours for {project}\").format(project=self.env.project_name) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']]) if", "b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone))", "key[1], seconds / 3600.) for key, seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours']", "= entry['ticket'] if ticket not in worker_hours: worker_hours[ticket] = 0 worker_hours[ticket] += entry['seconds_worked']", "hours, req.href, this_milestone)) return stream class MilestoneMarkup(object): \"\"\"Iterator for Transformer markup injection\"\"\" def", "add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV',", "from_date_raw data['from_date'] = user_time(req, format_date, from_date_raw) data['to_date_raw'] = to_date_raw data['to_date'] = user_time(req, format_date,", "if filename in ('roadmap.html', 'milestone_view.html') and \\ 'TICKET_VIEW_HOURS' in req.perm: trac_hours = TracHoursPlugin(self.env)", "= StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req, b, hours, req.href, this_milestone)) return", "args, context.href), # _('Next Week')) # prevnext_nav(req, _('Prev Week'), _('Next Week')) Chrome(self.env).add_jquery_ui(req) return", "should have received as part of this distribution. # import re import calendar", "== 'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'),", "\"\"\" SELECT * FROM ticket_time WHERE worker=%s AND time_started >= %s AND time_started", "key, seconds in sorted(worker_hours.items())] data['details'] = details data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1]", "if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[key] += entry['seconds_worked'] worker_hours", "id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by", "not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m", "= user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args = dict(req.args) args['from_date']", "-> hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours'] += total_hours # update", "%s AND time_started < %s \"\"\", *args) worker_hours = {} for entry in", "= m.group('user') if field == 'tickets': return self.user_by_ticket(req, user) elif field == 'dates':", "{'hours_format': hours_format, 'worker': user} self.date_data(req, data) args = [user] args += [int(time.mktime(data[i].timetuple())) for", "csv.writer(buffer) title = _(\"Hours for {user}\").format(user=user) writer.writerow([title, req.abs_href()]) writer.writerow([]) writer.writerow(['From', 'To']) writer.writerow([data['from_date'], data['to_date']])", "} worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not in worker_hours[date]['tickets']: worker_hours[date]['tickets'].append(ticket) data['tickets'] = dict([(entry['ticket'],", "for date, tickets, hours in worker_hours: ids = ['#{}'.format(id) for id in tickets]", "stream, data): \"\"\" filter the stream for the roadmap (/roadmap) and milestones /milestone/<milestone>", "data['to_date'] = user_time(req, format_date, to_date_raw) data['prev_week'] = from_date_raw - timedelta(days=7) args = dict(req.args)", "hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE time_started >= %s AND", "find_xpath = \"//*[@class='milestone']//h2/a\" xpath = \"//*[@class='milestone']/div[1]\" for milestone in milestones: hours[milestone.name] = dict(totalhours=0.,", "[(date, details['tickets'], details['seconds'] / 3600.) for date, details in sorted(worker_hours.items())] data['worker_hours'] = worker_hours", "writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req,", "add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'],", "the ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours = 0. hours[milestone.name]['estimatedhours']", "return iter([]) hours = self.hours[milestone] estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if not", "< parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date", "= self.this_milestone else: milestone = self.buffer.events[3][1] if milestone not in self.hours.keys(): return iter([])", "from_date_raw - timedelta(days=7) if 'to_date' in req.args: to_date_raw = user_time(req, parse_date, req.args['to_date']) to_date_raw", "import _ from trac.web.api import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider,", "_('Next Week')) Chrome(self.env).add_jquery_ui(req) return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours page", "args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')] hours = get_all_dict(self.env, \"\"\" SELECT", "{'hours_format': hours_format} # date data self.date_data(req, data) # milestone data milestone = req.args.get('milestone')", "user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args) def", "parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now", "0 if milestone and milestone != \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked']", "= user_time(req, parse_date, req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw", "!= \\ Ticket(self.env, entry['ticket']).values.get('milestone'): continue worker_hours[worker] += entry['seconds_worked'] worker_hours = [(worker, seconds /", "req.perm: trac_hours = TracHoursPlugin(self.env) hours = {} milestones = data.get('milestones') this_milestone = None", "hours for the ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours =", "import IRequestHandler, ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet )", "hours for the ticket (seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0", "else: milestone = self.buffer.events[3][1] if milestone not in self.hours.keys(): return iter([]) hours =", "ITemplateStreamFilter from trac.web.chrome import ( Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet ) from hours", "re.match(r'/hours/user/(?:tickets|dates)/(?:\\w+)', req.path_info) is not None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user':", "writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(),", "ids = ['#{}'.format(id) for id in tickets] writer.writerow([date, ','.join(ids), hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req,", "to hours worker_hours = [(date, details['tickets'], details['seconds'] / 3600.) for date, details in", "sqlhelper import get_all_dict from utils import hours_format class TracHoursRoadmapFilter(Component): implements(ITemplateStreamFilter) # ITemplateStreamFilter methods", "in worker_hours: writer.writerow([ticket, hours]) req.send(buffer.getvalue(), 'text/csv') add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'],", "req.args['to_date']) to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw']", "add_stylesheet(req, 'common/css/report.css') add_ctxtnav(req, _('Hours by Query'), req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user',", "i in worker_hours.keys()]) # sort by ticket number and convert to hours worker_hours", "estimated_hours = hours['estimatedhours'] total_hours = hours['totalhours'] if not (estimated_hours or total_hours): return iter([])", "class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods def get_htdocs_dirs(self):", "'To']) writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for ticket, hours in worker_hours: writer.writerow([ticket, hours])", "= worker_hours data['total_hours'] = sum(hours[1] for hours in worker_hours) if req.args.get('format') == 'csv':", "this_milestone): self.req = req self.buffer = buffer self.hours = hours self.href = href", "data['prev_url'] = req.href('/hours/user', **args) def users(self, req): \"\"\"hours for all users\"\"\" data =", "req.path_info) field = m.group('field') user = m.group('user') if field == 'tickets': return self.user_by_ticket(req,", "worker_hours[key] += entry['seconds_worked'] worker_hours = [(key[0], key[1], seconds / 3600.) for key, seconds", ">= %s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i in ('from_date_raw', 'to_date_raw')])", "writer.writerow([data['from_date'], data['to_date']]) writer.writerow([]) writer.writerow(['Ticket', 'Hours']) for date, tickets, hours in worker_hours: ids =", "this_milestone def __iter__(self): if self.this_milestone is not None: # for /milestone/xxx milestone =", "if estimated_hours: if parse_version(TRAC_VERSION) < parse_version('1.0'): items.append(tag.dt(_(\"Estimated Hours:\"))) items.append(tag.dd(str(estimated_hours))) else: items.append(tag.span(_(\"Estimated Hours: \"),", "as TRAC_VERSION from trac.core import * from trac.ticket import Ticket from trac.ticket.model import", "= data.get('milestones') this_milestone = None if milestones is None: # /milestone view :", "trachours = TracHoursPlugin(self.env) # tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT *", "entry in hours: ticket = entry['ticket'] if ticket not in worker_hours: worker_hours[ticket] =", "User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'), req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate',", "+= total_hours # update date for oldest ticket if ticket['time'] < hours[milestone.name]['date']: hours[milestone.name]['date']", "Copyright (C) 2009 <NAME> <<EMAIL>> # All rights reserved. # # This software", "number and convert to hours worker_hours = [(ticket_id, seconds / 3600.) for ticket_id,", "worker_hours data['total_hours'] = sum(hours[-1] for hours in worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req,", "user} self.date_data(req, data) args = [user] args += [int(time.mktime(data[i].timetuple())) for i in ('from_date_raw',", "= { 'seconds': 0, 'tickets': [], } worker_hours[date]['seconds'] += entry['seconds_worked'] if ticket not", "req.href(req.path_info, format='csv', from_date=data['from_date'], to_date=data['to_date']), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_date.html', data, 'text/html' def", "else: items.append(tag.span(_(\"Estimated Hours: \"), str(estimated_hours), class_=\"first interval\")) date = hours['date'] link = self.href(\"hours\",", "'date': add_ctxtnav(req, _('Hours summary'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user',", "= dict(totalhours=0., estimatedhours=0., ) tickets = [tid for tid, in self.env.db_query(\"\"\" SELECT id", "for the ticket (seconds -> hours) total_hours = trac_hours.get_total_hours( ticket.id) / 3600.0 hours[milestone.name]['totalhours']", "hours[milestone.name]['date']: hours[milestone.name]['date'] = ticket['time'] b = StreamBuffer() stream |= Transformer(find_xpath).copy(b).end().select(xpath). \\ append( self.MilestoneMarkup(req,", "= datetime.now() data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year", "mimetype='text/csv'): content = StringIO() content.write('\\xef\\xbb\\xbf') # BOM writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL) title", "'To']) writer.writerow([data['from_date'], data['to_date']]) if data['milestone']: writer.writerow(['Milestone', data['milestone']]) writer.writerow([]) writer.writerow(['Worker', 'Hours']) for worker, hours", "user = m.group('user') if field == 'tickets': return self.user_by_ticket(req, user) elif field ==", "# tickets = trachours.tickets_with_hours() hours = get_all_dict(self.env, \"\"\" SELECT * FROM ticket_time WHERE", "entry['seconds_worked'] data['tickets'] = dict([(i, Ticket(self.env, i)) for i in worker_hours.keys()]) # sort by", "in the file COPYING, which # you should have received as part of", "= user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user', **args)", "= (date, worker) if key not in worker_hours: worker_hours[key] = 0 if milestone", "worker_hours) if req.args.get('format') == 'csv': req.send(self.export_csv(req, data)) add_stylesheet(req, 'common/css/report.css') if details == 'date':", "# All rights reserved. # # This software is licensed as described in", "from_date=data['from_date'], to_date=data['to_date'])) else: add_ctxtnav(req, _('Hours by date'), req.href.hours('user', details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate',", "estimated hours for the ticket try: estimated_hours = float(ticket['estimatedhours']) except (ValueError, TypeError): estimated_hours", "milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones # get the hours", "if 'from_date' in req.args: from_date_raw = user_time(req, parse_date, req.args['from_date']) else: from_date_raw = datetime(now.year,", "details='date', from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev',", "trac.util.html import html as tag from trac.util.translation import _ from trac.web.api import IRequestHandler,", "in tickets: ticket = Ticket(self.env, ticket) # estimated hours for the ticket try:", "None def process_request(self, req): req.perm.require('TICKET_VIEW_HOURS') if req.path_info.rstrip('/') == '/hours/user': return self.users(req) m =", "worker_hours data['total_hours'] = sum(hours[1] for hours in worker_hours) if req.args.get('format') == 'csv': buffer", "req.href.hours('user/dates/{}'.format(user), from_date=data['from_date'], to_date=data['to_date'])) add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') Chrome(self.env).add_jquery_ui(req) return 'hours_user_by_ticket.html',", "datetime.now() data['days'] = range(1, 32) data['months'] = list(enumerate(calendar.month_name)) data['years'] = range(now.year, now.year -", "i)) for i in worker_hours.keys()]) # sort by ticket number and convert to", "csv import time from StringIO import StringIO from datetime import datetime, timedelta from", "ticket_time WHERE time_started >= %s AND time_started < %s \"\"\", *[int(time.mktime(data[i].timetuple())) for i", "args['from_date'] = user_time(req, format_date, data['prev_week']) args['to_date'] = user_time(req, format_date, from_date_raw) data['prev_url'] = req.href('/hours/user',", "add_link(req, 'alternate', req.href(req.path_info, format='csv'), 'CSV', 'text/csv', 'csv') # add_link(req, 'prev', self.get_href(query, args, context.href),", "timedelta(hours=23, minutes=59, seconds=59) else: to_date_raw = now data['from_date_raw'] = from_date_raw data['from_date'] = user_time(req,", "def users(self, req): \"\"\"hours for all users\"\"\" data = {'hours_format': hours_format} # date", "= hours['totalhours'] if not (estimated_hours or total_hours): return iter([]) items = [] if", "return 'hours_users.html', data, 'text/html' def user_by_ticket(self, req, user): \"\"\"hours page for a single", "time_started < %s \"\"\", *args) worker_hours = {} for entry in hours: ticket", "total_hours = hours['totalhours'] if not (estimated_hours or total_hours): return iter([]) items = []", "total_hours, href=link), class_='interval')) return iter(tag.p(*items, class_='legend')) class TracUserHours(Component): implements(ITemplateProvider, IRequestHandler) # ITemplateProvider methods", "data) # milestone data milestone = req.args.get('milestone') milestones = Milestone.select(self.env) data['milestones'] = milestones", "users(self, req): \"\"\"hours for all users\"\"\" data = {'hours_format': hours_format} # date data", "data['worker_hours'] = worker_hours data['total_hours'] = sum(hours[-1] for hours in worker_hours) if req.args.get('format') ==", "req.href.hours(from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by User'), req.href.hours('user', from_date=data['from_date'], to_date=data['to_date'])) add_ctxtnav(req, _('Hours by date'),", "sort by ticket number and convert to hours worker_hours = [(date, details['tickets'], details['seconds']", "import Ticket from trac.ticket.model import Milestone from trac.util.datefmt import format_date, parse_date, user_time from", "key not in worker_hours: worker_hours[key] = 0 if milestone and milestone != \\", "= {} for entry in hours: date = user_time(req, format_date, entry['time_started']) ticket =" ]
[ "GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor", "SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum =", "lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT", "= LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT +", "rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if", "unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum =", "import unittest import os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\", "RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder):", "reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files", "not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config", "MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum", "= RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not", "mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config =", "LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum()", "running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths", "set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum", "unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self):", "def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run()", "LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT,", "def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder", "running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum", "import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda()", "= TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir),", "def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def", "test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def test_with_validation(self): self.parameters.validation_smiles_path = SMILES_SET_LINK_INVENT_PATH self.runner.run() self._model_saved_and_logs_exist()", "shutil import unittest import os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import", "from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import", "RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils", "version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder,", "= os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\")", "num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config)", "running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH", "= ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir", "if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\")", "os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config", "from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from", "from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from", "self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def test_with_validation(self): self.parameters.validation_smiles_path =", "<gh_stars>100-1000 import shutil import unittest import os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from", "LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING)", "self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def test_with_validation(self):", "sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def", "LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import", "validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner =", "+ rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir,", "= GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder):", "output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters))", "TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from", "TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2,", "TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum()", "tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self):", "import os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from", "count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum", "self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters", "import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import", "self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def test_with_validation(self): self.parameters.validation_smiles_path", "TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum", "from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH,", "GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder)", "learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self):", "from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda", "LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum", "ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir =", "= LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config =", "running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum", "set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH,", "= TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None,", "import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from", "if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path", "\\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils", "os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path =", "mt_enum = ModelTypeEnum() self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder)", "self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir))", "log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH,", "run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self):", "recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10,", "self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config),", "logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def", "os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration", "LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH,", "import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum", "os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH,", "unittest import os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration", "import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import", "self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder,", "running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\", "def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def test_with_validation(self): self.parameters.validation_smiles_path = SMILES_SET_LINK_INVENT_PATH self.runner.run()", "import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration", "from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def", "\"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder,", "job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config)", "running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor", "from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import", "parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name)))", "running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from", "setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum = ModelTypeEnum() self.workfolder =", "self.lr_config = LinkInventLearningRateConfiguration() self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config", "TransferLearningModeConstructor(self.general_config) def tearDown(self): if os.path.isdir(self.workfolder): shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0)", "_model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist()", "LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\",", "from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import", "rm_enum.TRANSFER_LEARNING) if not os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL,", "class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum() mt_enum =", "import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum = RunningModeEnum()", "os.path.isdir(self.workfolder): os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config =", "self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def", "from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum from", "input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner", "self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING, version=\"3.0\", parameters=vars(self.parameters)) self.runner = TransferLearningModeConstructor(self.general_config) def tearDown(self): if", "import set_default_device_cuda from running_modes.enums.logging_mode_enum import LoggingModeEnum from running_modes.enums.running_mode_enum import RunningModeEnum from reinvent_models.model_factory.enums.model_type_enum import", "0) def test_no_validation(self): self.parameters.validation_smiles_path = None self.runner.run() self._model_saved_and_logs_exist() def test_with_validation(self): self.parameters.validation_smiles_path = SMILES_SET_LINK_INVENT_PATH", "import shutil import unittest import os from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration", "\\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from running_modes.utils import set_default_device_cuda from running_modes.enums.logging_mode_enum import", "os.makedirs(self.workfolder) self.log_dir = os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration()", "ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase):", "= LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH, output_path=self.workfolder, input_smiles_path=SMILES_SET_LINK_INVENT_PATH, validation_smiles_path=None, num_epochs=2, sample_size=10, learning_rate=self.lr_config) self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config), run_type=rm_enum.TRANSFER_LEARNING,", "from unittest_reinvent.fixtures.utils import count_empty_files class TestLinkInventTransferLearning(unittest.TestCase): def setUp(self): set_default_device_cuda() lm_enum = LoggingModeEnum() rm_enum", "= os.path.join(self.workfolder, \"test_log\") log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL, job_name=\"test_job\") self.lr_config = LinkInventLearningRateConfiguration() self.parameters =", "import \\ LinkInventLearningRateConfiguration from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\ LinkInventTransferLearningConfiguration from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor from", "shutil.rmtree(self.workfolder) def _model_saved_and_logs_exist(self): self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name))) self.assertTrue(os.path.isdir(self.log_dir)) self.assertEqual(count_empty_files(self.log_dir), 0) def test_no_validation(self): self.parameters.validation_smiles_path = None", "import ModelTypeEnum from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH from unittest_reinvent.fixtures.utils import count_empty_files class" ]
[ "convert loaded images to greyscale (1 channel). If 1, always convert loaded images", "{0, 1}, default 1 If 0, always convert loaded images to greyscale (1", "filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower() not", "root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str Path to root directory. flag", "def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is", "stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename = os.path.join(path,", "__init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length =", "flag : {0, 1}, default 1 If 0, always convert loaded images to", "self.items[idx][1] if self._transform is not None: return self._transform(img, label) if self._pseudo_labels is not", "which is not a directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for filename", "transform = lambda data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets : list List", "filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0],", "greyscale (1 channel). If 1, always convert loaded images to colored (3 channels).", ": 2020/2/12 15:47 # @Author : Chen # @File : datasets.py # @Software:", "root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str Path to", "Only support %s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx):", "root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag self._transform = transform", "channel). If 1, always convert loaded images to colored (3 channels). transform :", "ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files stored in a folder structure. like::", "2020/2/12 15:47 # @Author : Chen # @File : datasets.py # @Software: PyCharm", "not in self._exts: warnings.warn('Ignoring %s of type %s. Only support %s'%( filename, ext,", "self._transform = transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def", "filename) ext = os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring %s of type", "is the name for the integer label `i` items : list of tuples", "if not os.path.isdir(path): warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3) continue label", "flag self._transform = transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels", "root : str Path to root directory. flag : {0, 1}, default 1", "= lambda data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets : list List of", "a directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename", "str Path to root directory. flag : {0, 1}, default 1 If 0,", "default 1 If 0, always convert loaded images to greyscale (1 channel). If", "idx): img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is not None:", "images to colored (3 channels). transform : callable, default None A function that", "indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices =", "`synsets[i]` is the name for the integer label `i` items : list of", "convert loaded images to colored (3 channels). transform : callable, default None A", "root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str Path to root directory.", "(filename, label) pairs. \"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root)", "to root directory. flag : {0, 1}, default 1 If 0, always convert", "= [] for folder in sorted(os.listdir(root)): path = os.path.join(root, folder) if not os.path.isdir(path):", "import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly without", "not None: return self._transform(img, label) if self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx]", "numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly without replacement.", ": {0, 1}, default 1 If 0, always convert loaded images to greyscale", "img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is not None: return", "root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str Path to root directory. flag :", "= len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext =", "colored (3 channels). transform : callable, default None A function that takes data", "pairs. \"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag =", "self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets = [] self.items = []", "__len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files stored in", "transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag self._transform = transform self._exts =", "%s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx): img =", "= os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which is not a directory.'%path,", "of the sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected)", "label = len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext", "@Author : Chen # @File : datasets.py # @Software: PyCharm import os, warnings", "= transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self,", "def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length", "of class names. `synsets[i]` is the name for the integer label `i` items", "them:: transform = lambda data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets : list", "Length of the sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected =", "-*- coding: utf-8 -*- # @Time : 2020/2/12 15:47 # @Author : Chen", "list of tuples List of all images in (filename, label) pairs. \"\"\" def", "name for the integer label `i` items : list of tuples List of", "warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder)", "self._exts: warnings.warn('Ignoring %s of type %s. Only support %s'%( filename, ext, ', '.join(self._exts)))", "sorted(os.listdir(path)): filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower() not in self._exts:", "%s. Only support %s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self,", "integer label `i` items : list of tuples List of all images in", "images in (filename, label) pairs. \"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root", "elements from [0, length) randomly without replacement. Parameters ---------- length : int Length", "`i` items : list of tuples List of all images in (filename, label)", "of all images in (filename, label) pairs. \"\"\" def __init__(self, root, flag=1, transform=None,", "self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx] return img, label, idx, pseudo_label return", "'.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets = [] self.items", "all images in (filename, label) pairs. \"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None):", "np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices)", "not os.path.isdir(path): warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3) continue label =", "import dataset, sampler from mxnet import image import numpy as np class IdxSampler(sampler.Sampler):", "root/bus/wwww.jpg Parameters ---------- root : str Path to root directory. flag : {0,", "from [0, length) randomly without replacement. Parameters ---------- length : int Length of", "continue self.items.append((filename, label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1]", "ext = os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring %s of type %s.", "type %s. Only support %s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def", "pseudo_labels def _list_images(self, root): self.synsets = [] self.items = [] for folder in", "to colored (3 channels). transform : callable, default None A function that takes", "mxnet.gluon.data import dataset, sampler from mxnet import image import numpy as np class", "None A function that takes data and label and transforms them:: transform =", "= ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets =", "in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower() not in", "warnings from mxnet.gluon.data import dataset, sampler from mxnet import image import numpy as", "image import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly", "return iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image", "self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for", "support %s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx): img", "names. `synsets[i]` is the name for the integer label `i` items : list", "structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str", "in (filename, label) pairs. \"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root =", "from mxnet.gluon.data import dataset, sampler from mxnet import image import numpy as np", "sampler from mxnet import image import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements", "class names. `synsets[i]` is the name for the integer label `i` items :", "Attributes ---------- synsets : list List of class names. `synsets[i]` is the name", "like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str Path", "randomly without replacement. Parameters ---------- length : int Length of the sequence. \"\"\"", "root): self.synsets = [] self.items = [] for folder in sorted(os.listdir(root)): path =", "# @Software: PyCharm import os, warnings from mxnet.gluon.data import dataset, sampler from mxnet", "items : list of tuples List of all images in (filename, label) pairs.", "label `i` items : list of tuples List of all images in (filename,", "without replacement. Parameters ---------- length : int Length of the sequence. \"\"\" def", "transforms them:: transform = lambda data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets :", "IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly without replacement. Parameters ---------- length :", "for the integer label `i` items : list of tuples List of all", "stored in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters", "continue label = len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename)", "None: return self._transform(img, label) if self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx] return", "tuples List of all images in (filename, label) pairs. \"\"\" def __init__(self, root,", ": str Path to root directory. flag : {0, 1}, default 1 If", "lambda data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets : list List of class", "root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root : str Path to root", "transform : callable, default None A function that takes data and label and", "for loading image files stored in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg", "folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root :", "and label and transforms them:: transform = lambda data, label: (data.astype(np.float32)/255, label) Attributes", "Path to root directory. flag : {0, 1}, default 1 If 0, always", "from mxnet import image import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from", "images to greyscale (1 channel). If 1, always convert loaded images to colored", ": Chen # @File : datasets.py # @Software: PyCharm import os, warnings from", "data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets : list List of class names.", "int Length of the sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected", "and transforms them:: transform = lambda data, label: (data.astype(np.float32)/255, label) Attributes ---------- synsets", "transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root):", "self._transform is not None: return self._transform(img, label) if self._pseudo_labels is not None: pseudo_label", "loading image files stored in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg", "utf-8 -*- # @Time : 2020/2/12 15:47 # @Author : Chen # @File", "[] for folder in sorted(os.listdir(root)): path = os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring", "warnings.warn('Ignoring %s of type %s. Only support %s'%( filename, ext, ', '.join(self._exts))) continue", "1, always convert loaded images to colored (3 channels). transform : callable, default", "import image import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length)", "__getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is not", "the sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected", "indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A", "= os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring %s", "class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files stored in a folder structure.", "\"\"\"A dataset for loading image files stored in a folder structure. like:: root/car/0001.jpg", "list List of class names. `synsets[i]` is the name for the integer label", "in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ----------", "os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring %s of", "not None: pseudo_label = self._pseudo_labels[idx] return img, label, idx, pseudo_label return img, label,", "callable, default None A function that takes data and label and transforms them::", "sorted(os.listdir(root)): path = os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which is not", "---------- length : int Length of the sequence. \"\"\" def __init__(self, indices_selected): if", "filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring", "datasets.py # @Software: PyCharm import os, warnings from mxnet.gluon.data import dataset, sampler from", "self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files stored in a folder", "self._indices_selected = indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return", "# -*- coding: utf-8 -*- # @Time : 2020/2/12 15:47 # @Author :", "= np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices = self._indices_selected", "\"\"\"Samples elements from [0, length) randomly without replacement. Parameters ---------- length : int", "the integer label `i` items : list of tuples List of all images", "If 1, always convert loaded images to colored (3 channels). transform : callable,", "= image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is not None: return self._transform(img,", "%s, which is not a directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for", "self._flag) label = self.items[idx][1] if self._transform is not None: return self._transform(img, label) if", "self.items.append((filename, label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if", "os.path.expanduser(root) self._flag = flag self._transform = transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root)", "label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform", "flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag self._transform = transform self._exts", "always convert loaded images to greyscale (1 channel). If 1, always convert loaded", "directory. flag : {0, 1}, default 1 If 0, always convert loaded images", ": list of tuples List of all images in (filename, label) pairs. \"\"\"", "= self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset", "['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets = []", "1 If 0, always convert loaded images to greyscale (1 channel). If 1,", "mxnet import image import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0,", "path = os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which is not a", "loaded images to greyscale (1 channel). If 1, always convert loaded images to", "data and label and transforms them:: transform = lambda data, label: (data.astype(np.float32)/255, label)", "for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1] if ext.lower()", "(data.astype(np.float32)/255, label) Attributes ---------- synsets : list List of class names. `synsets[i]` is", "image.imread(self.items[idx][0], self._flag) label = self.items[idx][1] if self._transform is not None: return self._transform(img, label)", "= indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices)", "in self._exts: warnings.warn('Ignoring %s of type %s. Only support %s'%( filename, ext, ',", "dataset, sampler from mxnet import image import numpy as np class IdxSampler(sampler.Sampler): \"\"\"Samples", "to greyscale (1 channel). If 1, always convert loaded images to colored (3", "[] self.items = [] for folder in sorted(os.listdir(root)): path = os.path.join(root, folder) if", "__init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag self._transform =", "ext, ', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag)", "length) randomly without replacement. Parameters ---------- length : int Length of the sequence.", "label and transforms them:: transform = lambda data, label: (data.astype(np.float32)/255, label) Attributes ----------", "self._transform(img, label) if self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx] return img, label,", "%s of type %s. Only support %s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename,", "channels). transform : callable, default None A function that takes data and label", "None: pseudo_label = self._pseudo_labels[idx] return img, label, idx, pseudo_label return img, label, idx", "self._length = indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self):", "_list_images(self, root): self.synsets = [] self.items = [] for folder in sorted(os.listdir(root)): path", "os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring %s of type %s. Only support", "sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected =", "default None A function that takes data and label and transforms them:: transform", "15:47 # @Author : Chen # @File : datasets.py # @Software: PyCharm import", "isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0] def __iter__(self):", "\"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag", "as np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly without replacement. Parameters", "self.synsets = [] self.items = [] for folder in sorted(os.listdir(root)): path = os.path.join(root,", "label) Attributes ---------- synsets : list List of class names. `synsets[i]` is the", "@File : datasets.py # @Software: PyCharm import os, warnings from mxnet.gluon.data import dataset,", "list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices", "that takes data and label and transforms them:: transform = lambda data, label:", "self._root = os.path.expanduser(root) self._flag = flag self._transform = transform self._exts = ['.jpg', '.jpeg',", "def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files stored", "label = self.items[idx][1] if self._transform is not None: return self._transform(img, label) if self._pseudo_labels", "If 0, always convert loaded images to greyscale (1 channel). If 1, always", "= indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return", "import os, warnings from mxnet.gluon.data import dataset, sampler from mxnet import image import", "label: (data.astype(np.float32)/255, label) Attributes ---------- synsets : list List of class names. `synsets[i]`", "is not None: return self._transform(img, label) if self._pseudo_labels is not None: pseudo_label =", "if self._transform is not None: return self._transform(img, label) if self._pseudo_labels is not None:", ": int Length of the sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list):", "---------- synsets : list List of class names. `synsets[i]` is the name for", "length : int Length of the sequence. \"\"\" def __init__(self, indices_selected): if isinstance(indices_selected,", ": list List of class names. `synsets[i]` is the name for the integer", "of type %s. Only support %s'%( filename, ext, ', '.join(self._exts))) continue self.items.append((filename, label))", "@Software: PyCharm import os, warnings from mxnet.gluon.data import dataset, sampler from mxnet import", "self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1] if", "function that takes data and label and transforms them:: transform = lambda data,", "is not None: pseudo_label = self._pseudo_labels[idx] return img, label, idx, pseudo_label return img,", "pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag self._transform = transform self._exts = ['.jpg',", "= flag self._transform = transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels =", "synsets : list List of class names. `synsets[i]` is the name for the", "indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0]", "return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files stored in a", "@Time : 2020/2/12 15:47 # @Author : Chen # @File : datasets.py #", "of tuples List of all images in (filename, label) pairs. \"\"\" def __init__(self,", ": callable, default None A function that takes data and label and transforms", "self._flag = flag self._transform = transform self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels", "root directory. flag : {0, 1}, default 1 If 0, always convert loaded", "# @Author : Chen # @File : datasets.py # @Software: PyCharm import os,", "os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)", "the name for the integer label `i` items : list of tuples List", "= pseudo_labels def _list_images(self, root): self.synsets = [] self.items = [] for folder", "', '.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label", "return img, label, idx, pseudo_label return img, label, idx def __len__(self): return len(self.items)", "'.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets = [] self.items =", "---------- root : str Path to root directory. flag : {0, 1}, default", "__iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset):", "always convert loaded images to colored (3 channels). transform : callable, default None", "self._pseudo_labels[idx] return img, label, idx, pseudo_label return img, label, idx def __len__(self): return", "in sorted(os.listdir(root)): path = os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which is", "Chen # @File : datasets.py # @Software: PyCharm import os, warnings from mxnet.gluon.data", "np class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly without replacement. Parameters ----------", "ext.lower() not in self._exts: warnings.warn('Ignoring %s of type %s. Only support %s'%( filename,", "len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename = os.path.join(path, filename) ext = os.path.splitext(filename)[1]", "= [] self.items = [] for folder in sorted(os.listdir(root)): path = os.path.join(root, folder)", "indices_selected self._length = indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def", "iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading image files", "def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag = flag self._transform", "= os.path.splitext(filename)[1] if ext.lower() not in self._exts: warnings.warn('Ignoring %s of type %s. Only", "folder in sorted(os.listdir(root)): path = os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which", "os.path.isdir(path): warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3) continue label = len(self.synsets)", "np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class ImageFolderDataset(dataset.Dataset): \"\"\"A dataset for loading", "\"\"\" def __init__(self, indices_selected): if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected", "'.join(self._exts))) continue self.items.append((filename, label)) def __getitem__(self, idx): img = image.imread(self.items[idx][0], self._flag) label =", ": datasets.py # @Software: PyCharm import os, warnings from mxnet.gluon.data import dataset, sampler", "label) pairs. \"\"\" def __init__(self, root, flag=1, transform=None, pseudo_labels=None): self._root = os.path.expanduser(root) self._flag", "(3 channels). transform : callable, default None A function that takes data and", "if isinstance(indices_selected, list): indices_selected = np.array(indices_selected) self._indices_selected = indices_selected self._length = indices_selected.shape[0] def", "for folder in sorted(os.listdir(root)): path = os.path.join(root, folder) if not os.path.isdir(path): warnings.warn('Ignoring %s,", "Parameters ---------- root : str Path to root directory. flag : {0, 1},", "not a directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)):", "= os.path.expanduser(root) self._flag = flag self._transform = transform self._exts = ['.jpg', '.jpeg', '.png']", "Parameters ---------- length : int Length of the sequence. \"\"\" def __init__(self, indices_selected):", "pseudo_label = self._pseudo_labels[idx] return img, label, idx, pseudo_label return img, label, idx def", "self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets = [] self.items = [] for", "folder) if not os.path.isdir(path): warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3) continue", "self.items = [] for folder in sorted(os.listdir(root)): path = os.path.join(root, folder) if not", "List of class names. `synsets[i]` is the name for the integer label `i`", "def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length class", "coding: utf-8 -*- # @Time : 2020/2/12 15:47 # @Author : Chen #", "class IdxSampler(sampler.Sampler): \"\"\"Samples elements from [0, length) randomly without replacement. Parameters ---------- length", "return self._transform(img, label) if self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx] return img,", "[0, length) randomly without replacement. Parameters ---------- length : int Length of the", "List of all images in (filename, label) pairs. \"\"\" def __init__(self, root, flag=1,", "os, warnings from mxnet.gluon.data import dataset, sampler from mxnet import image import numpy", "PyCharm import os, warnings from mxnet.gluon.data import dataset, sampler from mxnet import image", "(1 channel). If 1, always convert loaded images to colored (3 channels). transform", "-*- # @Time : 2020/2/12 15:47 # @Author : Chen # @File :", "indices_selected.shape[0] def __iter__(self): indices = self._indices_selected np.random.shuffle(indices) return iter(indices) def __len__(self): return self._length", "image files stored in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg", "dataset for loading image files stored in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg", "a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg Parameters ---------- root", "0, always convert loaded images to greyscale (1 channel). If 1, always convert", "# @Time : 2020/2/12 15:47 # @Author : Chen # @File : datasets.py", "label) if self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx] return img, label, idx,", "files stored in a folder structure. like:: root/car/0001.jpg root/car/xxxa.jpg root/car/yyyb.jpg root/bus/123.jpg root/bus/023.jpg root/bus/wwww.jpg", "loaded images to colored (3 channels). transform : callable, default None A function", "self._exts = ['.jpg', '.jpeg', '.png'] self._list_images(self._root) self._pseudo_labels = pseudo_labels def _list_images(self, root): self.synsets", "if self._pseudo_labels is not None: pseudo_label = self._pseudo_labels[idx] return img, label, idx, pseudo_label", "= self.items[idx][1] if self._transform is not None: return self._transform(img, label) if self._pseudo_labels is", "def _list_images(self, root): self.synsets = [] self.items = [] for folder in sorted(os.listdir(root)):", "directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for filename in sorted(os.listdir(path)): filename =", "A function that takes data and label and transforms them:: transform = lambda", "= self._pseudo_labels[idx] return img, label, idx, pseudo_label return img, label, idx def __len__(self):", "1}, default 1 If 0, always convert loaded images to greyscale (1 channel).", "takes data and label and transforms them:: transform = lambda data, label: (data.astype(np.float32)/255,", "replacement. Parameters ---------- length : int Length of the sequence. \"\"\" def __init__(self,", "# @File : datasets.py # @Software: PyCharm import os, warnings from mxnet.gluon.data import", "is not a directory.'%path, stacklevel=3) continue label = len(self.synsets) self.synsets.append(folder) for filename in", "if ext.lower() not in self._exts: warnings.warn('Ignoring %s of type %s. Only support %s'%(" ]
[ "# Get the sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save", "affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom", "contains scripts, to be able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as", "in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads", "# add line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label)", "<filename>dev/atlas/create_atlas/create_masks_csf_and_gm.py<gh_stars>1-10 #!/usr/bin/env python # create masks of CSF and gray matter # Author:", "to the undefined values in WM atlas import sys, io, os, glob import", "as object because there are 4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load", "# output_image is the name of the niftii image created, ex: '3D_matrix.nii.gz' img", "get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder and converts them from .nii.gz format", "tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy matrix to niftii image", "\"*.nii.gz\")) #Initialise tracts variable as object because there are 4 dimensions tracts =", "is the name of the niftii image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d,", "an atlas folder and converts them from .nii.gz format to numpy ndarray Save", "mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add", "= \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): # Extract the tracts from the", "get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the sum of the tracts tracts_sum =", "nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts, to be", "tracts for label in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if", "= os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts, to be able to", "tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\"", "matter # Author: <EMAIL> # Created: 2014-12-06 # TODO: get GM # TODO:", "dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes of each tracts", "in WM atlas import sys, io, os, glob import numpy as np import", "add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas,", "subtract WM mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf)))", "2014-12-06 # TODO: get GM # TODO: add tract corresponding to the undefined", "dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask", "tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf", "to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct # parameters tracts_to_sum_index =", "# dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM", "from .nii.gz format to numpy ndarray Save path of each tracts Only the", "add tract corresponding to the undefined values in WM atlas import sys, io,", "Only the tract must be in tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder,", "label in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it is", "numpy matrix to niftii image # np_matrix_3d is a 3D numpy ndarray #", "for label in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it", "tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy matrix", "'+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder and", "0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the 2D image instead of", "\"\"\"Loads tracts in an atlas folder and converts them from .nii.gz format to", "# np_matrix_3d is a 3D numpy ndarray # output_image is the name of", "matrix to niftii image # np_matrix_3d is a 3D numpy ndarray # output_image", "main(): # Extract the tracts from the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts", "must be in tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts", "# Created: 2014-12-06 # TODO: get GM # TODO: add tract corresponding to", "'\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas", "gray matter # Author: <EMAIL> # Created: 2014-12-06 # TODO: get GM #", "def main(): # Extract the tracts from the atlas' folder tracts = get_tracts(folder_atlas)", "= nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def", "path of each tracts Only the tract must be in tracts_format in the", "boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain CSF mask sct.run('fslmaths", "of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts", "'3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image)", "that contains scripts, to be able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils", "atlas' folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the sum of", "4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes of each", "nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index):", "parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm =", "in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object because", "output_image, fname_atlas): # Save 3d numpy matrix to niftii image # np_matrix_3d is", "len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d,", "nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts,", "'scripts')) import sct_utils as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\",", "created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine)", "instead of 3D for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0]", "object because there are 4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each", "tracts if it is the 2D image instead of 3D for label in", "tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr", "= np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0]", "tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract", "-thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM", "and gray matter # Author: <EMAIL> # Created: 2014-12-06 # TODO: get GM", "it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel", "Extract the tracts from the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts)", "converts them from .nii.gz format to numpy ndarray Save path of each tracts", "the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz", "the undefined values in WM atlas import sys, io, os, glob import numpy", "# TODO: add tract corresponding to the undefined values in WM atlas import", "modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas", "a 3D numpy ndarray # output_image is the name of the niftii image", "# copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum =", "# parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm", "import sct_utils as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\")", "tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1", "the sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of", "range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the 2D", "image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii =", "tract corresponding to the undefined values in WM atlas import sys, io, os,", "folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the sum of the", "-dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub", "# subtract WM mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas,", "it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to", "== 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): #", "2D image instead of 3D for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim ==", "os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main():", "#Initialise tracts variable as object because there are 4 dimensions tracts = np.empty([len(fname_tract),", "for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1)", "2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save", "as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf =", "image # np_matrix_3d is a 3D numpy ndarray # output_image is the name", "file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): # Extract the tracts from", "sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i", "save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz')", "'+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index:", "numpy ndarray Save path of each tracts Only the tract must be in", "verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum", "output_image is the name of the niftii image created, ex: '3D_matrix.nii.gz' img =", "add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum = tracts_sum", "tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum = tracts_sum +", "-bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') #", "= np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes of each tracts for label", "text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in", "glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object because there are 4 dimensions tracts", "-kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain CSF mask", "file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): # Extract", "masks of CSF and gray matter # Author: <EMAIL> # Created: 2014-12-06 #", "label in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return", "tracts in an atlas folder and converts them from .nii.gz format to numpy", "in tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as", "int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy matrix to", "WM atlas import sys, io, os, glob import numpy as np import nibabel", "tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object", "#Load each partial volumes of each tracts for label in range(0, len(fname_tract)): tracts[label,", "geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape)", "np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information", "# create masks of CSF and gray matter # Author: <EMAIL> # Created:", "len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the 2D image", "= os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def", "sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain", "WM mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) #", "Created: 2014-12-06 # TODO: get GM # TODO: add tract corresponding to the", "Save 3d numpy matrix to niftii image # np_matrix_3d is a 3D numpy", "each tracts for label in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts", "img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image,", "np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0)", "to be able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct #", "the name of the niftii image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4))", "atlas folder and converts them from .nii.gz format to numpy ndarray Save path", "ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii,", "= nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy", "= \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): # Extract the", "folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt'", "3D numpy ndarray # output_image is the name of the niftii image created,", "= img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+'", "# TODO: get GM # TODO: add tract corresponding to the undefined values", "copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0,", "CSF and gray matter # Author: <EMAIL> # Created: 2014-12-06 # TODO: get", "'info_label.txt' def main(): # Extract the tracts from the atlas' folder tracts =", "= glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object because there are 4 dimensions", "in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts", "path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts, to be able", "tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes of each tracts for", "able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct # parameters tracts_to_sum_index", "<EMAIL> # Created: 2014-12-06 # TODO: get GM # TODO: add tract corresponding", "len(tracts) # Get the sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) #", "tracts Only the tract must be in tracts_format in the folder\"\"\" fname_tract =", "niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin", "tracts variable as object because there are 4 dimensions tracts = np.empty([len(fname_tract), 1],", "i in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0] return tracts_sum if __name__", "image instead of 3D for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2:", "numpy ndarray # output_image is the name of the niftii image created, ex:", "# binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths", "np import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that", "save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy matrix to niftii image # np_matrix_3d", "3D for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)),", "nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts,", "file_label = 'info_label.txt' def main(): # Extract the tracts from the atlas' folder", ".nii.gz format to numpy ndarray Save path of each tracts Only the tract", "tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d", "to niftii image # np_matrix_3d is a 3D numpy ndarray # output_image is", "#!/usr/bin/env python # create masks of CSF and gray matter # Author: <EMAIL>", "them from .nii.gz format to numpy ndarray Save path of each tracts Only", "1], dtype=object) #Load each partial volumes of each tracts for label in range(0,", "sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas =", "volumes of each tracts for label in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data()", "because there are 4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each partial", "python # create masks of CSF and gray matter # Author: <EMAIL> #", "#Reshape tracts if it is the 2D image instead of 3D for label", "niftii image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii", "values in WM atlas import sys, io, os, glob import numpy as np", "as np import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path", "it is the 2D image instead of 3D for label in range(0, len(fname_tract)):", "in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the", "output_image) # copy geometric information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum", "of the niftii image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine =", "in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0] return tracts_sum if __name__ ==", "if it is the 2D image instead of 3D for label in range(0,", "sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it", "-sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF,", "as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts, to", "GM # TODO: add tract corresponding to the undefined values in WM atlas", "np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0] return", "tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt text_label = '\\n'+str(nb_tracts)+',", "tracts_sum = tracts_sum + tracts[i, 0] return tracts_sum if __name__ == \"__main__\": main()", "fname_atlas): # Save 3d numpy matrix to niftii image # np_matrix_3d is a", "def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy matrix to niftii image #", "be in tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable", "of each tracts Only the tract must be in tracts_format in the folder\"\"\"", "tracts from the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) # Get", "sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of the", "= 'info_label.txt' def main(): # Extract the tracts from the atlas' folder tracts", "0]).shape) for i in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0] return tracts_sum", "= len(tracts) # Get the sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index)", "# Extract the tracts from the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts =", "import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains", "the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object because there", "= tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy", "of each tracts for label in range(0, len(fname_tract)): tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape", "range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def", "ndarray # output_image is the name of the niftii image created, ex: '3D_matrix.nii.gz'", "the 2D image instead of 3D for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim", "and converts them from .nii.gz format to numpy ndarray Save path of each", "io, os, glob import numpy as np import nibabel as nib path_sct =", "sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv", "'+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i in", "info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts", "tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum,", "'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') #", "tracts_sum = np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i,", "sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt text_label =", "for i in tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0] return tracts_sum if", "nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the 2D image instead of 3D for", "CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder", "undefined values in WM atlas import sys, io, os, glob import numpy as", "the tracts from the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) #", "if (tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image,", "# Author: <EMAIL> # Created: 2014-12-06 # TODO: get GM # TODO: add", "np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes of each tracts for label in", "corresponding to the undefined values in WM atlas import sys, io, os, glob", "there are 4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes", "load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29", "of 3D for label in range(0, len(fname_tract)): if (tracts[label,0]).ndim == 2: tracts[label,0] =", "io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder and converts", "information sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0) def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for", "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label =", "binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz", "os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts, to be able to load modules", "to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line", "tracts[label, 0] = nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the 2D image instead", "be able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct # parameters", "np_matrix_3d is a 3D numpy ndarray # output_image is the name of the", "line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder):", "3d numpy matrix to niftii image # np_matrix_3d is a 3D numpy ndarray", "nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) # copy geometric", "glob import numpy as np import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__)))", "append path that contains scripts, to be able to load modules sys.path.append(os.path.join(path_sct, 'scripts'))", "import sys, io, os, glob import numpy as np import nibabel as nib", "img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine() np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine) nib.save(np_matrix_3d_nii, output_image) #", "tracts_to_sum_index: tracts_sum = tracts_sum + tracts[i, 0] return tracts_sum if __name__ == \"__main__\":", "name of the niftii image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine", "of CSF and gray matter # Author: <EMAIL> # Created: 2014-12-06 # TODO:", "5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil", "# Save sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) #", "(tracts[label,0]).ndim == 2: tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1) return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas):", "tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts to niftii", "return tracts def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas): # Save 3d numpy matrix to niftii", "\"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it", "partial volumes of each tracts for label in range(0, len(fname_tract)): tracts[label, 0] =", "path that contains scripts, to be able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import", "the tract must be in tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\"))", "mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt text_label", "create masks of CSF and gray matter # Author: <EMAIL> # Created: 2014-12-06", "atlas import sys, io, os, glob import numpy as np import nibabel as", "= nib.load(fname_tract[label]).get_data() #Reshape tracts if it is the 2D image instead of 3D", "\"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): # Extract the tracts from the atlas'", "Author: <EMAIL> # Created: 2014-12-06 # TODO: get GM # TODO: add tract", "is the 2D image instead of 3D for label in range(0, len(fname_tract)): if", "each tracts Only the tract must be in tracts_format in the folder\"\"\" fname_tract", "get GM # TODO: add tract corresponding to the undefined values in WM", "to numpy ndarray Save path of each tracts Only the tract must be", "in an atlas folder and converts them from .nii.gz format to numpy ndarray", "folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object because there are", "folder and converts them from .nii.gz format to numpy ndarray Save path of", "sct_utils as sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf", "format to numpy ndarray Save path of each tracts Only the tract must", "Save sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize", "'+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas,", "# append path that contains scripts, to be able to load modules sys.path.append(os.path.join(path_sct,", "obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in", "CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf))) # add line in info_label.txt", "variable as object because there are 4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object)", "Save path of each tracts Only the tract must be in tracts_format in", "os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append path that contains scripts, to be able to load", "are 4 dimensions tracts = np.empty([len(fname_tract), 1], dtype=object) #Load each partial volumes of", "the niftii image created, ex: '3D_matrix.nii.gz' img = nib.Nifti1Image(np_matrix_3d, np.eye(4)) affine = img.get_affine()", "= get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the sum of the tracts tracts_sum", "tract must be in tracts_format in the folder\"\"\" fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise", "add line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def", "is a 3D numpy ndarray # output_image is the name of the niftii", "ndarray Save path of each tracts Only the tract must be in tracts_format", "tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain CSF mask sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all", "Get the sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum", "from the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the", "os, glob import numpy as np import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\",", "# Save 3d numpy matrix to niftii image # np_matrix_3d is a 3D", "\"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): # Extract the tracts", "os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz') # dilate", "fname_tract = glob.glob(os.path.join(tracts_folder, \"*.nii.gz\")) #Initialise tracts variable as object because there are 4", "dtype=object) #Load each partial volumes of each tracts for label in range(0, len(fname_tract)):", "sct # parameters tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\"", "'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder and converts them from", "0.5 -bin tmp.WM_all_bin.nii.gz') # dilate it sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz')", "def add_tracts(tracts, tracts_to_sum_index): tracts_sum = np.empty((tracts[0, 0]).shape) for i in tracts_to_sum_index: tracts_sum =", "tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz') # subtract WM mask to obtain CSF", "file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder and converts them", "sys, io, os, glob import numpy as np import nibabel as nib path_sct", "the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts to", "= 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 folder_atlas = os.path.join(\"WMtracts_outputs\", \"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label", "tracts_to_sum_index) # Save sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\"))", "= add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz',", "of the tracts to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths", "file_csf))) # add line in info_label.txt text_label = '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label)", "def get_tracts(tracts_folder): \"\"\"Loads tracts in an atlas folder and converts them from .nii.gz", "tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the sum of the tracts", "\"final_results\") file_csf = \"WMtract__csf.nii.gz\" file_gm = \"WMtract__gm.nii.gz\" file_label = 'info_label.txt' def main(): #", "scripts, to be able to load modules sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct", "numpy as np import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) # append", "TODO: get GM # TODO: add tract corresponding to the undefined values in", "= '\\n'+str(nb_tracts)+', CSF, '+file_csf io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label) def get_tracts(tracts_folder): \"\"\"Loads tracts in an", "TODO: add tract corresponding to the undefined values in WM atlas import sys,", "the atlas' folder tracts = get_tracts(folder_atlas) nb_tracts = len(tracts) # Get the sum", "nb_tracts = len(tracts) # Get the sum of the tracts tracts_sum = add_tracts(tracts,", "import numpy as np import nibabel as nib path_sct = os.environ.get(\"SCT_DIR\", os.path.dirname(os.path.dirname(__file__))) #", "to niftii save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, \"WMtract__00.nii.gz\")) # binarize it sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5", "each partial volumes of each tracts for label in range(0, len(fname_tract)): tracts[label, 0]", "niftii image # np_matrix_3d is a 3D numpy ndarray # output_image is the" ]
[ "as a compiled binary, for any purpose, commercial or non-commercial, and by any", "of our heirs and successors. We intend this dedication to be an overt", "the public domain. We make this dedication for the benefit of the public", "domain. We make this dedication for the benefit of the public at large", "code form or as a compiled binary, for any purpose, commercial or non-commercial,", "We intend this dedication to be an overt act of relinquishment in perpetuity", "dedication to be an overt act of relinquishment in perpetuity of all present", "= RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main(): train_model() if __name__ == '__main__':", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED", "to the detriment of our heirs and successors. We intend this dedication to", "sell, or distribute this software, either in source code form or as a", "by any means. In jurisdictions that recognize copyright laws, the author or authors", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org>", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "future rights to this software under copyright law. THE SOFTWARE IS PROVIDED \"AS", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "copyright laws, the author or authors of this software dedicate any and all", "please refer to <https://unlicense.org> ''' import pandas as pd from sklearn.ensemble import RandomForestClassifier", "copy, modify, publish, use, compile, sell, or distribute this software, either in source", "is free to copy, modify, publish, use, compile, sell, or distribute this software,", "We make this dedication for the benefit of the public at large and", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "means. In jurisdictions that recognize copyright laws, the author or authors of this", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "large and to the detriment of our heirs and successors. We intend this", "free and unencumbered software released into the public domain. Anyone is free to", "OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org> '''", "software to the public domain. We make this dedication for the benefit of", "any and all copyright interest in the software to the public domain. We", "an overt act of relinquishment in perpetuity of all present and future rights", "pd from sklearn.ensemble import RandomForestClassifier from joblib import dump def train_model(): data =", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "public domain. We make this dedication for the benefit of the public at", "a compiled binary, for any purpose, commercial or non-commercial, and by any means.", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "more information, please refer to <https://unlicense.org> ''' import pandas as pd from sklearn.ensemble", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "this software under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "make this dedication for the benefit of the public at large and to", "author or authors of this software dedicate any and all copyright interest in", "compiled binary, for any purpose, commercial or non-commercial, and by any means. In", "this software, either in source code form or as a compiled binary, for", "at large and to the detriment of our heirs and successors. We intend", "domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute", "released into the public domain. Anyone is free to copy, modify, publish, use,", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information,", "binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions", "in source code form or as a compiled binary, for any purpose, commercial", "laws, the author or authors of this software dedicate any and all copyright", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "the benefit of the public at large and to the detriment of our", "our heirs and successors. We intend this dedication to be an overt act", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "this dedication for the benefit of the public at large and to the", "authors of this software dedicate any and all copyright interest in the software", "and all copyright interest in the software to the public domain. We make", "pandas as pd from sklearn.ensemble import RandomForestClassifier from joblib import dump def train_model():", "and successors. We intend this dedication to be an overt act of relinquishment", "of the public at large and to the detriment of our heirs and", "rights to this software under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\",", "or distribute this software, either in source code form or as a compiled", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "to the public domain. We make this dedication for the benefit of the", "AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "For more information, please refer to <https://unlicense.org> ''' import pandas as pd from", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE", "copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "SOFTWARE. For more information, please refer to <https://unlicense.org> ''' import pandas as pd", "refer to <https://unlicense.org> ''' import pandas as pd from sklearn.ensemble import RandomForestClassifier from", "the public at large and to the detriment of our heirs and successors.", "unencumbered software released into the public domain. Anyone is free to copy, modify,", "and unencumbered software released into the public domain. Anyone is free to copy,", "the public domain. Anyone is free to copy, modify, publish, use, compile, sell,", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer", "public at large and to the detriment of our heirs and successors. We", "free to copy, modify, publish, use, compile, sell, or distribute this software, either", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "distribute this software, either in source code form or as a compiled binary,", "any means. In jurisdictions that recognize copyright laws, the author or authors of", "= data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "the author or authors of this software dedicate any and all copyright interest", "and to the detriment of our heirs and successors. We intend this dedication", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "recognize copyright laws, the author or authors of this software dedicate any and", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "compile, sell, or distribute this software, either in source code form or as", "that recognize copyright laws, the author or authors of this software dedicate any", "detriment of our heirs and successors. We intend this dedication to be an", "X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib')", "the detriment of our heirs and successors. We intend this dedication to be", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "dedicate any and all copyright interest in the software to the public domain.", "RandomForestClassifier from joblib import dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome')", "from joblib import dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train", "overt act of relinquishment in perpetuity of all present and future rights to", "is free and unencumbered software released into the public domain. Anyone is free", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "this software dedicate any and all copyright interest in the software to the", "NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "of all present and future rights to this software under copyright law. THE", "this dedication to be an overt act of relinquishment in perpetuity of all", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For", "import pandas as pd from sklearn.ensemble import RandomForestClassifier from joblib import dump def", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "IN THE SOFTWARE. For more information, please refer to <https://unlicense.org> ''' import pandas", "Anyone is free to copy, modify, publish, use, compile, sell, or distribute this", "or authors of this software dedicate any and all copyright interest in the", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "to copy, modify, publish, use, compile, sell, or distribute this software, either in", "law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "''' This is free and unencumbered software released into the public domain. Anyone", "interest in the software to the public domain. We make this dedication for", "to <https://unlicense.org> ''' import pandas as pd from sklearn.ensemble import RandomForestClassifier from joblib", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "information, please refer to <https://unlicense.org> ''' import pandas as pd from sklearn.ensemble import", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please", "= data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main(): train_model() if", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "and by any means. In jurisdictions that recognize copyright laws, the author or", "source code form or as a compiled binary, for any purpose, commercial or", "to be an overt act of relinquishment in perpetuity of all present and", "joblib import dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train =", "benefit of the public at large and to the detriment of our heirs", "into the public domain. Anyone is free to copy, modify, publish, use, compile,", "all present and future rights to this software under copyright law. THE SOFTWARE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM,", "under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "This is free and unencumbered software released into the public domain. Anyone is", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "import dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome']", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org> ''' import", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train,", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc,", "y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main(): train_model()", "import RandomForestClassifier from joblib import dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train =", "data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main(): train_model() if __name__", "from sklearn.ensemble import RandomForestClassifier from joblib import dump def train_model(): data = pd.read_csv('data/diabetes.csv')", "in the software to the public domain. We make this dedication for the", "purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright", "public domain. Anyone is free to copy, modify, publish, use, compile, sell, or", "sklearn.ensemble import RandomForestClassifier from joblib import dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train", "def train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc =", "be an overt act of relinquishment in perpetuity of all present and future", "jurisdictions that recognize copyright laws, the author or authors of this software dedicate", "USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to", "THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "all copyright interest in the software to the public domain. We make this", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "copyright interest in the software to the public domain. We make this dedication", "form or as a compiled binary, for any purpose, commercial or non-commercial, and", "any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize", "intend this dedication to be an overt act of relinquishment in perpetuity of", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "dedication for the benefit of the public at large and to the detriment", "software released into the public domain. Anyone is free to copy, modify, publish,", "publish, use, compile, sell, or distribute this software, either in source code form", "successors. We intend this dedication to be an overt act of relinquishment in", "of relinquishment in perpetuity of all present and future rights to this software", "for the benefit of the public at large and to the detriment of", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY", "IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "''' import pandas as pd from sklearn.ensemble import RandomForestClassifier from joblib import dump", "rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main(): train_model() if __name__ ==", "data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main():", "THE SOFTWARE. For more information, please refer to <https://unlicense.org> ''' import pandas as", "software, either in source code form or as a compiled binary, for any", "RandomForestClassifier() rfc.fit(X_train, y_train) dump(rfc, 'model/diabetes_predictor.joblib') def main(): train_model() if __name__ == '__main__': main()", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "software dedicate any and all copyright interest in the software to the public", "software under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "<https://unlicense.org> ''' import pandas as pd from sklearn.ensemble import RandomForestClassifier from joblib import", "either in source code form or as a compiled binary, for any purpose,", "= pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier() rfc.fit(X_train, y_train)", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "In jurisdictions that recognize copyright laws, the author or authors of this software", "in perpetuity of all present and future rights to this software under copyright", "dump def train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc", "commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws,", "present and future rights to this software under copyright law. THE SOFTWARE IS", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "use, compile, sell, or distribute this software, either in source code form or", "act of relinquishment in perpetuity of all present and future rights to this", "modify, publish, use, compile, sell, or distribute this software, either in source code", "of this software dedicate any and all copyright interest in the software to", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "to this software under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "or as a compiled binary, for any purpose, commercial or non-commercial, and by", "perpetuity of all present and future rights to this software under copyright law.", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "train_model(): data = pd.read_csv('data/diabetes.csv') X_train = data.drop(columns='Outcome') y_train = data['Outcome'] rfc = RandomForestClassifier()", "for any purpose, commercial or non-commercial, and by any means. In jurisdictions that", "SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "heirs and successors. We intend this dedication to be an overt act of", "the software to the public domain. We make this dedication for the benefit", "relinquishment in perpetuity of all present and future rights to this software under", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "as pd from sklearn.ensemble import RandomForestClassifier from joblib import dump def train_model(): data" ]
[ "length = 1 step = 0 rotations = 0 first, second = False,", "data += 1 if data == number: first = True # Sum of", "+= 1 if data == number: first = True # Sum of all", "all adjecant squares adjecant_sum = 0 for d in directions: x_1 = x", "1 x, y = (0, 0) direction = 0 directions = [(1, 0),", "= 1 second_result = 0 length = 1 step = 0 rotations =", "= 0 rotations += 1 # Every two rotations length of the side", "+= 1 # If number of steps equals length of the current trajectory,", "y = (0, 0) direction = 0 directions = [(1, 0), (0, 1),", "current trajectory, then rotate if step >= length: direction = (direction + 1)", "steps equals length of the current trajectory, then rotate if step >= length:", "second): # Add a step d = directions[direction] x += d[0] y +=", "0 if not first: data += 1 if data == number: first =", "# Sum of all adjecant squares adjecant_sum = 0 for d in directions:", "= x + d[0] y_1 = y + d[1] adjecant_sum += values[(x_1, y_1)]", "length of the current trajectory, then rotate if step >= length: direction =", "# If number of steps equals length of the current trajectory, then rotate", "(-1, 1)] data = 1 second_result = 0 length = 1 step =", "if data == number: first = True # Sum of all adjecant squares", "= 1 step = 0 rotations = 0 first, second = False, False", "rotate if step >= length: direction = (direction + 1) % 4 step", "1) % 4 step = 0 rotations += 1 # Every two rotations", "= 0 if not first: data += 1 if data == number: first", "d = directions[direction] x += d[0] y += d[1] step += 1 #", "and adjecant_sum > number: second_result = adjecant_sum second = True print('Part One: {}\\nPart", "values[(0, 0)] = 1 x, y = (0, 0) direction = 0 directions", "d[0] y_1 = y + d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)] =", "1 # If number of steps equals length of the current trajectory, then", "0)] = 1 x, y = (0, 0) direction = 0 directions =", "rotations length of the side is 1 longer if rotations == 2: length", "side is 1 longer if rotations == 2: length += 1 rotations =", "second = False, False while not (first and second): # Add a step", "the side is 1 longer if rotations == 2: length += 1 rotations", "longer if rotations == 2: length += 1 rotations = 0 if not", "step >= length: direction = (direction + 1) % 4 step = 0", "1 # Every two rotations length of the side is 1 longer if", "+= values[(x_1, y_1)] values[(x, y)] = adjecant_sum if not second and adjecant_sum >", "1 second_result = 0 length = 1 step = 0 rotations = 0", "1 rotations = 0 if not first: data += 1 if data ==", "4 step = 0 rotations += 1 # Every two rotations length of", "== number: first = True # Sum of all adjecant squares adjecant_sum =", "import defaultdict number = int(input()) values = defaultdict(int) values[(0, 0)] = 1 x,", "defaultdict number = int(input()) values = defaultdict(int) values[(0, 0)] = 1 x, y", "= 0 rotations = 0 first, second = False, False while not (first", "(0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)] data = 1", "not first: data += 1 if data == number: first = True #", "+= d[0] y += d[1] step += 1 # If number of steps", "Sum of all adjecant squares adjecant_sum = 0 for d in directions: x_1", "of all adjecant squares adjecant_sum = 0 for d in directions: x_1 =", "y + d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)] = adjecant_sum if not", "-1), (1, 1), (-1, -1), (1, -1), (-1, 1)] data = 1 second_result", "for d in directions: x_1 = x + d[0] y_1 = y +", "0 directions = [(1, 0), (0, 1), (-1, 0), (0, -1), (1, 1),", "d[1] step += 1 # If number of steps equals length of the", "Add a step d = directions[direction] x += d[0] y += d[1] step", "x, y = (0, 0) direction = 0 directions = [(1, 0), (0,", "not (first and second): # Add a step d = directions[direction] x +=", "defaultdict(int) values[(0, 0)] = 1 x, y = (0, 0) direction = 0", "If number of steps equals length of the current trajectory, then rotate if", "y)] = adjecant_sum if not second and adjecant_sum > number: second_result = adjecant_sum", "d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)] = adjecant_sum if not second and", "data == number: first = True # Sum of all adjecant squares adjecant_sum", "= adjecant_sum if not second and adjecant_sum > number: second_result = adjecant_sum second", "if rotations == 2: length += 1 rotations = 0 if not first:", "direction = 0 directions = [(1, 0), (0, 1), (-1, 0), (0, -1),", "squares adjecant_sum = 0 for d in directions: x_1 = x + d[0]", "= defaultdict(int) values[(0, 0)] = 1 x, y = (0, 0) direction =", "length of the side is 1 longer if rotations == 2: length +=", "second and adjecant_sum > number: second_result = adjecant_sum second = True print('Part One:", "rotations == 2: length += 1 rotations = 0 if not first: data", "True # Sum of all adjecant squares adjecant_sum = 0 for d in", "first: data += 1 if data == number: first = True # Sum", "(-1, 0), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)] data", "= 0 first, second = False, False while not (first and second): #", "# Every two rotations length of the side is 1 longer if rotations", "(1, 1), (-1, -1), (1, -1), (-1, 1)] data = 1 second_result =", "+= d[1] step += 1 # If number of steps equals length of", "number = int(input()) values = defaultdict(int) values[(0, 0)] = 1 x, y =", "values[(x_1, y_1)] values[(x, y)] = adjecant_sum if not second and adjecant_sum > number:", "= directions[direction] x += d[0] y += d[1] step += 1 # If", "1), (-1, 0), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)]", "adjecant_sum += values[(x_1, y_1)] values[(x, y)] = adjecant_sum if not second and adjecant_sum", "step = 0 rotations += 1 # Every two rotations length of the", "values[(x, y)] = adjecant_sum if not second and adjecant_sum > number: second_result =", "number: first = True # Sum of all adjecant squares adjecant_sum = 0", "= y + d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)] = adjecant_sum if", "data = 1 second_result = 0 length = 1 step = 0 rotations", "in directions: x_1 = x + d[0] y_1 = y + d[1] adjecant_sum", "> number: second_result = adjecant_sum second = True print('Part One: {}\\nPart Two: {}'.format(abs(x)", "from collections import defaultdict number = int(input()) values = defaultdict(int) values[(0, 0)] =", "(1, -1), (-1, 1)] data = 1 second_result = 0 length = 1", "then rotate if step >= length: direction = (direction + 1) % 4", "[(1, 0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1, -1), (1,", "while not (first and second): # Add a step d = directions[direction] x", "y += d[1] step += 1 # If number of steps equals length", "equals length of the current trajectory, then rotate if step >= length: direction", "1 step = 0 rotations = 0 first, second = False, False while", "directions = [(1, 0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1,", "length += 1 rotations = 0 if not first: data += 1 if", "= 0 for d in directions: x_1 = x + d[0] y_1 =", "= (0, 0) direction = 0 directions = [(1, 0), (0, 1), (-1,", "second_result = 0 length = 1 step = 0 rotations = 0 first,", "if not second and adjecant_sum > number: second_result = adjecant_sum second = True", "of the side is 1 longer if rotations == 2: length += 1", "step += 1 # If number of steps equals length of the current", "directions: x_1 = x + d[0] y_1 = y + d[1] adjecant_sum +=", "if step >= length: direction = (direction + 1) % 4 step =", "(direction + 1) % 4 step = 0 rotations += 1 # Every", "second_result = adjecant_sum second = True print('Part One: {}\\nPart Two: {}'.format(abs(x) + abs(y),", "1), (-1, -1), (1, -1), (-1, 1)] data = 1 second_result = 0", "0 length = 1 step = 0 rotations = 0 first, second =", "directions[direction] x += d[0] y += d[1] step += 1 # If number", "length: direction = (direction + 1) % 4 step = 0 rotations +=", "0 first, second = False, False while not (first and second): # Add", "Every two rotations length of the side is 1 longer if rotations ==", "two rotations length of the side is 1 longer if rotations == 2:", "= 0 directions = [(1, 0), (0, 1), (-1, 0), (0, -1), (1,", "% 4 step = 0 rotations += 1 # Every two rotations length", "+ d[0] y_1 = y + d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)]", "direction = (direction + 1) % 4 step = 0 rotations += 1", "= 1 x, y = (0, 0) direction = 0 directions = [(1,", "+= 1 # Every two rotations length of the side is 1 longer", "0 rotations = 0 first, second = False, False while not (first and", "trajectory, then rotate if step >= length: direction = (direction + 1) %", "number: second_result = adjecant_sum second = True print('Part One: {}\\nPart Two: {}'.format(abs(x) +", "= [(1, 0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1, -1),", "+ d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)] = adjecant_sum if not second", "not second and adjecant_sum > number: second_result = adjecant_sum second = True print('Part", "= False, False while not (first and second): # Add a step d", "step = 0 rotations = 0 first, second = False, False while not", "rotations = 0 if not first: data += 1 if data == number:", "(0, 1), (-1, 0), (0, -1), (1, 1), (-1, -1), (1, -1), (-1,", "1 if data == number: first = True # Sum of all adjecant", "-1), (1, -1), (-1, 1)] data = 1 second_result = 0 length =", "False, False while not (first and second): # Add a step d =", "of steps equals length of the current trajectory, then rotate if step >=", "x += d[0] y += d[1] step += 1 # If number of", "+= 1 rotations = 0 if not first: data += 1 if data", "0 rotations += 1 # Every two rotations length of the side is", "# Add a step d = directions[direction] x += d[0] y += d[1]", "d in directions: x_1 = x + d[0] y_1 = y + d[1]", "rotations += 1 # Every two rotations length of the side is 1", "<gh_stars>0 from collections import defaultdict number = int(input()) values = defaultdict(int) values[(0, 0)]", "0), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)] data =", "0 for d in directions: x_1 = x + d[0] y_1 = y", "False while not (first and second): # Add a step d = directions[direction]", "the current trajectory, then rotate if step >= length: direction = (direction +", "1 longer if rotations == 2: length += 1 rotations = 0 if", "a step d = directions[direction] x += d[0] y += d[1] step +=", "adjecant_sum = 0 for d in directions: x_1 = x + d[0] y_1", "= int(input()) values = defaultdict(int) values[(0, 0)] = 1 x, y = (0,", "-1), (-1, 1)] data = 1 second_result = 0 length = 1 step", "of the current trajectory, then rotate if step >= length: direction = (direction", "int(input()) values = defaultdict(int) values[(0, 0)] = 1 x, y = (0, 0)", "is 1 longer if rotations == 2: length += 1 rotations = 0", "2: length += 1 rotations = 0 if not first: data += 1", "= adjecant_sum second = True print('Part One: {}\\nPart Two: {}'.format(abs(x) + abs(y), second_result))", "and second): # Add a step d = directions[direction] x += d[0] y", "number of steps equals length of the current trajectory, then rotate if step", "= 0 length = 1 step = 0 rotations = 0 first, second", "(first and second): # Add a step d = directions[direction] x += d[0]", "first = True # Sum of all adjecant squares adjecant_sum = 0 for", "step d = directions[direction] x += d[0] y += d[1] step += 1", "first, second = False, False while not (first and second): # Add a", "(-1, -1), (1, -1), (-1, 1)] data = 1 second_result = 0 length", "rotations = 0 first, second = False, False while not (first and second):", "+ 1) % 4 step = 0 rotations += 1 # Every two", "0) direction = 0 directions = [(1, 0), (0, 1), (-1, 0), (0,", ">= length: direction = (direction + 1) % 4 step = 0 rotations", "values = defaultdict(int) values[(0, 0)] = 1 x, y = (0, 0) direction", "y_1)] values[(x, y)] = adjecant_sum if not second and adjecant_sum > number: second_result", "collections import defaultdict number = int(input()) values = defaultdict(int) values[(0, 0)] = 1", "d[0] y += d[1] step += 1 # If number of steps equals", "adjecant_sum > number: second_result = adjecant_sum second = True print('Part One: {}\\nPart Two:", "adjecant squares adjecant_sum = 0 for d in directions: x_1 = x +", "== 2: length += 1 rotations = 0 if not first: data +=", "adjecant_sum if not second and adjecant_sum > number: second_result = adjecant_sum second =", "if not first: data += 1 if data == number: first = True", "= (direction + 1) % 4 step = 0 rotations += 1 #", "x + d[0] y_1 = y + d[1] adjecant_sum += values[(x_1, y_1)] values[(x,", "0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1, -1), (1, -1),", "x_1 = x + d[0] y_1 = y + d[1] adjecant_sum += values[(x_1,", "= True # Sum of all adjecant squares adjecant_sum = 0 for d", "1)] data = 1 second_result = 0 length = 1 step = 0", "y_1 = y + d[1] adjecant_sum += values[(x_1, y_1)] values[(x, y)] = adjecant_sum", "(0, 0) direction = 0 directions = [(1, 0), (0, 1), (-1, 0)," ]
[ "with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in users: if e in [\"\",\"", "in pro: if ' Key Content : ' in i: passwrd=i break passwrd=passwrd[29:]", "w: w.write(\"-------------------------\\n\") for e in users: if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\")", "as w: w.write(\"-------------------------\\n\") for e in users: if e in [\"\",\" \"]: continue", "os,platform,subprocess from sys import exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW", "main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode()", "pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in", "[\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines()", "import exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show", "show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro: if", "startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro: if ' Key Content : '", "subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for", "txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in", "pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in users: if e in", "<reponame>Lets7512/Saved_WiFi_Passwords_Scraper_On_Windows import os,platform,subprocess from sys import exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags", "|= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in", "\"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\"", "e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i", "{} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro: if ' Key", "open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in users: if e in [\"\",\" \"]:", "profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro: if '", ": ' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w) if __name__", "wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]: try: users.append(i[27:])", "= subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[]", "i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w) if __name__ == \"__main__\": main()", "txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]: try:", "if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e),", "users=[] for i in txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w:", "in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252')", "Content : ' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w) if", "sys import exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan", "users: if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {}", "i in txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for", "for i in txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\")", "passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w) if __name__ == \"__main__\": main() exit()", "in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w) if __name__ == \"__main__\":", "passwrd=\"\" for i in pro: if ' Key Content : ' in i:", "show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]: try: users.append(i[27:]) except:", "try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in users:", "e in users: if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show", "w.write(\"-------------------------\\n\") for e in users: if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh", "key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro: if ' Key Content", "pro=pro.splitlines() passwrd=\"\" for i in pro: if ' Key Content : ' in", "i in pro: if ' Key Content : ' in i: passwrd=i break", "Key Content : ' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w)", "startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a')", "startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines()", "subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]:", "wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro:", "except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in users: if e", "profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]: try: users.append(i[27:]) except: pass", "exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles',", "shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for i in pro: if ' Key Content :", "shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i in txt[9:]: try: users.append(i[27:]) except: pass with", "users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e in users: if", "in users: if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile", "from sys import exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh", "import os,platform,subprocess from sys import exit def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |=", "if ' Key Content : ' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' :", "for e in users: if e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan", "' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\" w.write(to_w) if __name__ ==", "e in [\"\",\" \"]: continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False,", "txt=txt.splitlines() users=[] for i in txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as", "def main(): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False,", "in txt[9:]: try: users.append(i[27:]) except: pass with open(\"keys.txt\",'a') as w: w.write(\"-------------------------\\n\") for e", "for i in pro: if ' Key Content : ' in i: passwrd=i", "' Key Content : ' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+' : '+passwrd+\"\\n\"", "continue e=(\"\\\"\"+e+\"\\\"\") pro=subprocess.check_output(\"netsh wlan show profile {} key=clear\".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252') pro=pro.splitlines() passwrd=\"\" for", "pro: if ' Key Content : ' in i: passwrd=i break passwrd=passwrd[29:] to_w=e+'", "startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode() txt=txt.splitlines() users=[] for i" ]
[ "import setup setup( name='pyr', version='0.4.1', description='A nicer REPL for Python.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/zain/pyr',", "from setuptools import setup setup( name='pyr', version='0.4.1', description='A nicer REPL for Python.', author='<NAME>',", "version='0.4.1', description='A nicer REPL for Python.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/zain/pyr', packages=['pyr'], install_requires=['pygments'], scripts=['bin/pyr'], )", "setup setup( name='pyr', version='0.4.1', description='A nicer REPL for Python.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/zain/pyr', packages=['pyr'],", "setup( name='pyr', version='0.4.1', description='A nicer REPL for Python.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/zain/pyr', packages=['pyr'], install_requires=['pygments'],", "python from setuptools import setup setup( name='pyr', version='0.4.1', description='A nicer REPL for Python.',", "#!/usr/bin/env python from setuptools import setup setup( name='pyr', version='0.4.1', description='A nicer REPL for", "setuptools import setup setup( name='pyr', version='0.4.1', description='A nicer REPL for Python.', author='<NAME>', author_email='<EMAIL>',", "name='pyr', version='0.4.1', description='A nicer REPL for Python.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/zain/pyr', packages=['pyr'], install_requires=['pygments'], scripts=['bin/pyr']," ]
[ "* x**ix * y**iy i += 1 for ix in range(order2 + 1):", "fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol':", "mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# #", "infile + \" does not appear to contain a CHARIS wavelength solution in", "coef_opt = res.x if not res.success: log.info(\"Optimizing PSFlet location transformation coefficients may have", "* (order + 2): pass # raise ValueError(\"Number of coefficients incorrect for polynomial", "boolean Returns ------- score: float Negative sum of PSFlet fluxes, to be minimized", "Polynomial coefficients of wavelength solution order: int Order of polynomial wavelength solution Notes", "= int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25)", "this is the name of the file infiledir: String If load is True,", "nlam_max] == 0): break self.xindx = x[:, :, :nlam_max] self.yindx = y[:, :,", "j += 1 i += 1 for ix in range(coeforder + 1): for", "+ 2)) for k in range(1, interporder + 1): coef += k *", "PSFlet order: int order of the polynomial fit trimfrac: float fraction of outliers", "_dy = _transform(xindx, yindx, coeforder, coef) dx += [_dx] dy += [_dy] R", "// 8 _s = x.shape[0] * 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape])", "true value for the PSF-let spots. Default 0.7. coef: list initial guess of", "1): if ix + iy <= order1: continue _x += coef[i] * x**ix", "+ 0.25) - 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i", "not exist.\") outfile = re.sub('//', '/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx))", "self.nlam_max = None self.interp_arr = None self.order = None if load: self.loadpixsol(infile, infiledir)", "None self.yindx = None self.lam_indx = None self.nlam = None self.nlam_max = None", "pix_x, k=1, s=0) nlam[ix, iy] = y2 - y1 + 1 y[ix, iy,", "A loop then does a quick check to find reasonable offsets in x", "= coefshort[j] j += 1 i += 1 return coef def _transform(x, y,", "xindx, yindx = np.meshgrid(xindx, xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape)))", "value for the PSF-let spots. Default 0.7. coef: list initial guess of the", "order=3, lam1=None, lam2=None): ''' Returns the spectral resolution and interpolated wavelength array Parameters", "i = 0 for ix in range(coeforder + 1): for iy in range(coeforder", "k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k return coef def", "in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx,", "# discard these from the calculation before trimming. ################################################################# _x, _y = _transform(x,", "location of a given lenslet for a given polynomial fit Parameters ---------- coef:", "of length (order+1)*(order+2) to be optimized. Notes ----- The list of coefficients has", "_x += coef[i] * x**ix * y**iy i += 1 for ix in", "xarr[:, i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0]", ":nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x)", "in range(coeforder - ix + 1): if ix + iy <= shortorder: coef[i]", "coefficients for frame \" + inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s],", "solution # to 4% below and 3% above limits of the coefficient file", "except: raise ValueError(\"Polynomial order must be integer\") # n**2 + 3*n + 2", "for estimating the grid of centroids. Should be close to the true value", "nlam[ix, iy] = y2 - y1 + 1 y[ix, iy, :nlam[ix, iy]] =", "with the PSFLets on the detector. Does most of the heavy lifting during", "Does most of the heavy lifting during the wavelength calibration step. \"\"\" def", "Whether to load an already-existing wavelength calibration file infile: String If load is", "is None: infile = re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try:", "for a polynomial fit of the input order (i.e., for order 3, up", "+ iy <= order1: continue _x += coef[i] * x**ix * y**iy i", "of outliers). Analogous to the trimmed mean. Parameters ---------- coef: list of floats", "exist.\") outfile = re.sub('//', '/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx))", "i = 0 for ix in range(order2 + 1): for iy in range(order1", "Y coordinate on the detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y", "Returns ------- interp_lam: array Array of wavelengths R: float Effective spectral resolution '''", "for desired wavelength `lam` Parameters ---------- lam: float Wavelength in nm allcoef: list", "+ 1] = scale * np.sin(phi) coef[n / 2 + order + 1]", "Parameters ---------- coef: list of floats coefficients for polynomial transformation x: ndarray coordinates", "5) * (_y > 5) * (_y < ydim - 5) return [_x,", "Y coordinate on the detector ''' if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0]))", "options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] +=", "1): for iy in range(order1 - ix + 1): _x += coef[i] *", "ndarray coordinates of lenslets filtered: ndarray image convolved with gaussian PSFlet order: int", "y is 2*subsize ############################################################# if coef is None: ix_arr = np.arange(0, 14, 0.5)", "[] for i in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder +", "re import numpy as np from astropy.io import fits from scipy import interpolate,", "highordercoef: Boolean Returns ------- _x: ndarray Transformed coordinates _y: ndarray Transformed coordinates \"\"\"", "= np.exp(-(x**2 + y**2) / (2 * sig**2)) if inImage.ivar is None: unfiltered", "if not supplied, are initially set to the known pitch angle and scale.", "2) coef = np.zeros((n)) coef[0] = x0 coef[1] = scale * np.cos(phi) coef[order", "+ 1): for iy in range(coeforder - ix + 1): if ix +", "wavelength solution lam1: float Shortest wavelength in nm lam2: float Longest wavelength in", "must be integer\") else: if order < 1 or order > 5: raise", "+ 1] = scale * np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder =", "allcoef, xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns the spectral resolution and interpolated", "the name of the file infiledir: String If load is True, this is", "Boolean: do the lenslet PSFlets lie within the detector? ############################################################# good = (_x", "fluxes (disregarding those with the most and the least flux to limit the", "calibration file Parameters ---------- infile: String Name of the file infiledir: String Directory", "else: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame \" +", "even a little less). Important note: as of now (09/2015), the number of", "= np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for i in range(n_spline): coef =", "np.amax(lam) * 1.03 interporder = order if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order)", "---------- lam: float Wavelength in nm allcoef: list of floats Polynomial coefficients of", "coef[(polyorder + 1) * (polyorder + 2) / 2] += iy - subshape", "iy, :nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max] ==", "its inverse. Parameters ---------- lam: float Wavelength in nm allcoef: list of lists", "return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns the spectral resolution", "* inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered =", "solution to directory \" + outdir + \". Directory does not exist.\") outfile", "self.yindx = y[:, :, :nlam_max] self.nlam = nlam self.lam_indx = lam_out[:, :, :nlam_max]", "an Image class, assumed to be a monochromatic grid of spots with read", "= order xarr = np.ones((lam.shape[0], order + 1)) for i in range(1, order", "outliers). Analogous to the trimmed mean. Parameters ---------- coef: list of floats coefficients", "allcoef: list of floats List describing the polynomial coefficients that best fit the", "iy <= order: coef_short += [coef[i]] i += 1 for ix in range(coeforder", "Assumed to be a monochromatic grid of spots polyorder: float order of the", "np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline]", "the array of xindx, yindx, nlam, lam_indx and nlam_max \"\"\" ################################################################### # Read", "return interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1]))", "interp_y = _transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y if self.interp_arr is None:", "& low combined) to trim in the minimization. Default 0.1 (5% trimmed on", "microspectrum Parameters ---------- lam: float Wavelength in nm allcoef: list of floats List", "import Image log = logging.getLogger('main') class PSFLets: \"\"\" Helper class to deal with", "optimize, signal try: from charis.image import Image except: from image import Image log", "position of all lenslets 3. a 2D ndarray with the number of valid", "order must be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") #", "designed to constrain the domain of the PSF-let fitting later in the pipeline.", "location transformation coefficients for frame \" + inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=(", "os import re import numpy as np from astropy.io import fits from scipy", "of centroids. Should be close to the true value for the PSF-let spots.", "signal-to-noise ratios of order unity (or even a little less). Important note: as", "fitorder is not None: coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x,", "> 2048): continue if pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1,", "res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1) * (polyorder", "+ xdim / 2. - subshape, y0=iy + ydim / 2. - subshape,", "order = (order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray Transformed coordinates _y: ndarray", "to apply to the central pixel. Default 0 Returns ------- coef: list of", "= _transform(x, y, polyorder, coef_opt) ############################################################# # Boolean: do the lenslet PSFlets lie", "detector array. # Then optimize the coefficients. # We want to start with", "if lam1 is None: lam1 = np.amin(lam) / 1.04 if lam2 is None:", "mean. Parameters ---------- coef: list of floats coefficients for polynomial transformation x: ndarray", "def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets", "by # default. ################################################################### if lam1 is None: lam1 = np.amin(lam) / 1.04", "method='Powell') coef_opt = res.x else: log.info(\"Performing initial optimization of PSFlet location transformation coefficients", "the wavelength solution, and ultimately to compute its inverse. Parameters ---------- lam: float", "Parameters ---------- lam: float Wavelength in nm allcoef: list of lists floats Polynomial", "and returns the esimated positions of the spot centroids. This is designed to", "higher-order coefficients. This routine seems to be relatively robust down to per-lenslet signal-to-noise", "continue _y += coef[i] * x**ix * y**iy i += 1 return [_x,", "lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0):", "is None: lam1 = np.amin(lam) / 1.04 if lam2 is None: lam2 =", "x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6})", "hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile + \" does", "spots, coefficients of wavelength # solution. Obtain extrapolated limits of wavlength solution #", "scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an Image class, assumed to be a", "----- the coefficients, if not supplied, are initially set to the known pitch", "of outliers (high & low combined) to trim Default 0.1 (5% trimmed on", "to SNR/PSFlet ~ 1 # Create slice indices for subimages to perform the", "list of floats List describing the polynomial coefficients that best fit the lenslets,", "+ 1): if ix + iy <= shortorder: coef[i] = coefshort[j] j +=", "= np.amin(lam) / 1.04 if lam2 is None: lam2 = np.amax(lam) * 1.03", "that are slightly to the right to get a good initial guess. #############################################################", "iy]] = np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy,", "limits of wavlength solution # to 4% below and 3% above limits of", "CHARIS FOV, # discard these from the calculation before trimming. ################################################################# _x, _y", "\". Directory does not exist.\") outfile = re.sub('//', '/', outdir + '/PSFloc.fits') out", "order: int order of the polynomial fit trimfrac: float fraction of outliers (high", "interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1", "1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt,", "# are now at a slightly higher wavelength, so try out offsets #", "float Wavelength in nm allcoef: list of lists floats Polynomial coefficients of wavelength", "+ '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx", "the wavelength calibration step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the", "res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol':", "- y1 + 1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1)", "if ix + iy <= order: coef_short += [coef[i]] i += 1 for", "[_x, _y] def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function", "################################################################# _x, _y = _transform(x, y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y,", "polynomial order.\") coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k", "y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok", "Gaussian used for estimating the grid of centroids. Should be close to the", "is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder", "FITS file, each extension corresponding to: 0. the list of wavelengths at which", "detector for the fiducial lenslet spacing. \"\"\" ############################################################# # Convolve with a Gaussian,", "\"\"\" Private function _initcoef in locate_psflets Create a set of coefficients including a", "locate_psflets Return the negative of the sum of the middle XX% of the", "np from astropy.io import fits from scipy import interpolate, ndimage, optimize, signal try:", "for lenslet coordinates outside the CHARIS FOV, # discard these from the calculation", "solution xindx: int X index of lenslet in lenslet array yindx: int Y", "_x, _y = _transform(x, y, polyorder, coef_opt) ############################################################# # Boolean: do the lenslet", "pass # raise ValueError(\"Number of coefficients incorrect for polynomial order.\") except: raise AttributeError(\"order", "not supplied, are initially set to the known pitch angle and scale. A", "outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters ---------- outdir: String Directory in which", "coef def return_locations_short(self, coef, xindx, yindx): ''' Returns the x,y detector location of", "def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the class Parameters ---------- load: Boolean", "for iy in range(order1 - ix + 1): _x += coef[i] * x**ix", "trimfrac) if newval < bestval: bestval = newval coef_opt = copy.deepcopy(coef) if init:", "j += 1 i += 1 return coef def _transform(x, y, order, coef,", "allcoef, xindx, yindx, order=3): ''' Calculates the detector coordinates of lenslet located at", "y. good:2D boolean ndarray True for lenslets with spots inside the detector footprint", "x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing", "on the detector. Does most of the heavy lifting during the wavelength calibration", "self.interp_arr[:, i] = coef def return_locations_short(self, coef, xindx, yindx): ''' Returns the x,y", "the coefficients of the wavelength solution. These will be used to smooth/interpolate the", "in lenslet array order: int Order of polynomial wavelength solution Returns ------- interp_x:", "- ix + 1): if ix + iy <= shortorder: coef[i] = coefshort[j]", "locatePSFlets takes an Image class, assumed to be a monochromatic grid of spots", "the middle XX% of the PSFlet spot fluxes (disregarding those with the most", "of spots polyorder: float order of the polynomial coordinate transformation. Default 2. sig:", "raise def geninterparray(self, lam, allcoef, order=3): ''' Set up array to solve for", "_insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1) * (polyorder + 2) /", "+ 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12) i =", "coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac,", "s=0) nlam[ix, iy] = y2 - y1 + 1 y[ix, iy, :nlam[ix, iy]]", "Default 0.7. coef: list initial guess of the coefficients of polynomial coordinate transformation", "y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin =", "out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam, allcoef, order=3): '''", "interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr is None: if", "coefficients from last time, we assume that we # are now at a", "Returns ------- score: float Negative sum of PSFlet fluxes, to be minimized \"\"\"", "in nm Returns ------- interp_lam: array Array of wavelengths R: float Effective spectral", "3% above limits of the coefficient file by # default. ################################################################### if lam1", "the polynomical fit lam1: float Lowest wavelength in nm lam2: float Highest wavelength", "True else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save", "order 3, up to terms like x**3 and x**2*y, but not x**3*y). It", "1.5 + 1e-12) i = 0 j = 0 for ix in range(coeforder", "with the Y position of all lenslets 3. a 2D ndarray with the", "True for lenslets with spots inside the detector footprint coef: list of floats", "Private function _corrval in locate_psflets Return the negative of the sum of the", "8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for iy in iy_arr:", "interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength at", "coefshort[j] j += 1 i += 1 for ix in range(coeforder + 1):", "The pitch angle of the lenslets. Default atan(1.926) x0: float x offset to", "interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] = y2 - y1 + 1 y[ix,", "of coefficients incorrect for polynomial order.\") except: raise AttributeError(\"order must be integer, coef", "fit of the input order (i.e., for order 3, up to terms like", "Create a set of coefficients including a rotation matrix plus zeros. Parameters ----------", "== int(order): raise ValueError(\"Polynomial order must be integer\") else: if order < 1", "y: ndarray of floats Rectilinear grid order: int Order of the polynomial fit", "+ 2)) for k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k", "for frame \" + inImage.filename) coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin,", "for iy in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y = interp_y[:, ix,", "list of lists floats Polynomial coefficients of wavelength solution xindx: int X index", "3. a 2D ndarray with the number of valid wavelengths for a given", "(high & low combined) to trim in the minimization. Default 0.1 (5% trimmed", "image. ############################################################# x = np.arange(-1 * int(3 * sig + 1), int(3 *", "transformation coefficients for frame \" + inImage.filename) coef_lin = _pullorder(coef_opt, 1) res =", "coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder + 1) * (coeforder +", "2. - subshape, y0=iy + ydim / 2. - subshape, scale=scale, phi=phi) else:", "the file resides ''' if infile is None: infile = re.sub('//', '/', infiledir", "the pipeline. Parameters ---------- imImage: Image class Assumed to be a monochromatic grid", "This routine seems to be relatively robust down to per-lenslet signal-to-noise ratios of", "res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered,", "trimmed on the high end, 5% on the low end) highordercoef: boolean Returns", "= optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt", "given polynomial fit Parameters ---------- coef: lists floats Polynomial coefficients of fit for", "_corrval in locate_psflets Return the negative of the sum of the middle XX%", "of PSFlet location transformation coefficients for frame \" + inImage.filename) if not init", "x0: float x offset to apply to the central pixel. Default 0 y0:", "and x**2*y, but not x**3*y). It is all zeros in the output apart", "subimages to perform the intial # fits on. The new dimensionality in both", "= np.zeros((order + 1, allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0], order +", "order must be integer\") else: if order < 1 or order > 5:", "class Assumed to be a monochromatic grid of spots polyorder: float order of", "be a list.\") try: if not order == int(order): raise ValueError(\"Polynomial order must", "coef[i] * x**ix * y**iy i += 1 if highordercoef is None: return", "# fits on. The new dimensionality in both x and y is 2*subsize", "------- _x: ndarray Transformed coordinates _y: ndarray Transformed coordinates \"\"\" try: if not", "else: coef = copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder + 1) *", "polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x if not res.success:", "X index of lenslet in lenslet array yindx: int Y index of lenslet", "k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y =", "lists floats Polynomial coefficients of fit for a single wavelength xindx: int X", "solution order: int Order of polynomial wavelength solution Notes ----- Populates the attribute", "res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell')", "+= 1 for ix in range(order2 + 1): for iy in range(order1 -", "lenslet for a given polynomial fit Parameters ---------- coef: lists floats Polynomial coefficients", "wavelength solution. These will be used to smooth/interpolate the wavelength solution, and ultimately", "re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx = hdulist[0].data self.yindx", "is a multi-extension FITS file, each extension corresponding to: 0. the list of", "= 0 j = 0 for ix in range(coeforder + 1): for iy", "if not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution to directory \" +", "int Y index of lenslet in lenslet array order: int Order of polynomial", "lists floats Polynomial coefficients of wavelength solution order: int Order of polynomial wavelength", "default. ################################################################### if lam1 is None: lam1 = np.amin(lam) / 1.04 if lam2", "import glob import logging import os import re import numpy as np from", "length (order+1)*(order+2) to be optimized. Notes ----- The list of coefficients has space", "central pixel. Default 0 Returns ------- coef: list of floats A list of", "------- interp_x: float X coordinate on the detector interp_y: float Y coordinate on", "* np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx, yindx, coeforder, coef) dx +=", "trimfrac / 2) vals_sorted = np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score", "a multi-extension FITS file, each extension corresponding to: 0. the list of wavelengths", "to contain a CHARIS wavelength solution in the appropriate format.\") self.nlam_max = np.amax(self.nlam)", "-scale * np.sin(phi) coef[n / 2] = y0 coef[n / 2 + 1]", "infile = re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx =", "- subshape coef[(polyorder + 1) * (polyorder + 2) / 2] += iy", "= [] dx = [] for i in range(n_spline): coef = np.zeros((coeforder +", "infiledir='.'): ''' Initialize the class Parameters ---------- load: Boolean Whether to load an", "x0 coef[1] = scale * np.cos(phi) coef[order + 1] = -scale * np.sin(phi)", "ValueError(\"Polynomial order must be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\")", "trimfrac: float fraction of lenslet outliers (high & low combined) to trim in", "False bestval = 0 subshape = xdim * 3 // 8 _s =", "solution, and ultimately to compute its inverse. Parameters ---------- lam: float Wavelength in", "\" + inImage.filename) _x, _y = _transform(x, y, polyorder, coef_opt) ############################################################# # Boolean:", "1.03 interporder = order if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder =", "'/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx =", "floats List describing the polynomial coefficients that best fit the lenslets, for all", "coefficients of fit for a single wavelength xindx: int X index of lenslet", "################################################################### if lam1 is None: lam1 = np.amin(lam) / 1.04 if lam2 is", "coefficients of wavelength solution order: int Order of polynomial wavelength solution Notes -----", "indices for subimages to perform the intial # fits on. The new dimensionality", "- 1.5 + 1e-12) i = 0 for ix in range(order2 + 1):", "hard-coded as 1/10 the dimensionality of the final array. This is sufficient to", "to deal with the PSFLets on the detector. Does most of the heavy", "except: raise RuntimeError(\"File \" + infile + \" does not appear to contain", "= ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0]", "floats Polynomial coefficients of fit for a single wavelength xindx: int X index", "allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") xindx = np.arange(-100, 101)", "Read in wavelengths of spots, coefficients of wavelength # solution. Obtain extrapolated limits", "must be integer, coef should be a list.\") try: if not order ==", "the low end) Returns ------- x: 2D ndarray Estimated spot centroids in x.", "allcoef: list of lists floats Polynomial coefficients of wavelength solution xindx: int X", "\"\"\" Calculates the wavelength at the center of each pixel within a microspectrum", "= int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y =", "allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder + 1) *", "Create slice indices for subimages to perform the intial # fits on. The", "raise RuntimeError(\"File \" + infile + \" does not appear to contain a", "of spots, coefficients of wavelength # solution. Obtain extrapolated limits of wavlength solution", "= 20 ydim, xdim = inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim //", "\"\"\" function locatePSFlets takes an Image class, assumed to be a monochromatic grid", "coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef) x", "input order (i.e., for order 3, up to terms like x**3 and x**2*y,", "given lenslet (some wavelengths fall outside of the detector area) ''' if not", "outdir + \". Directory does not exist.\") outfile = re.sub('//', '/', outdir +", "i = 0 for ix in range(order1 + 1): for iy in range(order1", "+ 1e-12) i = 0 j = 0 for ix in range(coeforder +", "sig + 1) + 1) x, y = np.meshgrid(x, x) gaussian = np.exp(-(x**2", "extension corresponding to: 0. the list of wavelengths at which the calibration is", "end, 5% on the low end) Returns ------- x: 2D ndarray Estimated spot", "interp_x, interp_y def return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns", "desired wavelength `lam` Parameters ---------- lam: float Wavelength in nm allcoef: list of", "Transformed coordinates _y: ndarray Transformed coordinates \"\"\" try: if not len(coef) == (order", "np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048): continue if", "None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam", "with previous values\") init = False bestval = 0 subshape = xdim *", "match the length required by order = (order+1)*(order+2) highordercoef: Boolean Returns ------- _x:", "order + 1)) for i in range(1, order + 1): xarr[:, i] =", "= np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out =", "coordinate transformation trimfrac: float fraction of lenslet outliers (high & low combined) to", "that we # are now at a slightly higher wavelength, so try out", "higher wavelength, so try out offsets # that are slightly to the right", "+ 1] = -scale * np.sin(phi) coef[n / 2] = y0 coef[n /", "used for estimating the grid of centroids. Should be close to the true", "############################################################# log.info(\"Performing final optimization of PSFlet location transformation coefficients for frame \" +", "coefficient file by # default. ################################################################### if lam1 is None: lam1 = np.amin(lam)", "np.all(pix_y > 2048): continue if pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1],", "grid is hard-coded as 1/10 the dimensionality of the final array. This is", "yindx, coeforder, coef) return interp_x, interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None):", "self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function", "+ 3*n + 2 = (n + 1.5)**2 - 0.25 # = (1/4)*((2*n", "+ 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder,", "= res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1) *", "right to get a good initial guess. ############################################################# log.info(\"Performing final optimization of PSFlet", "Convolve with a Gaussian, centroid the filtered image. ############################################################# x = np.arange(-1 *", "+= ix - subshape coef[(polyorder + 1) * (polyorder + 2) / 2]", "load=False, infile=None, infiledir='.'): ''' Initialize the class Parameters ---------- load: Boolean Whether to", "list of floats Polynomial coefficients of wavelength solution xindx: int X index of", "pixel. Default 0 y0: float y offset to apply to the central pixel.", "of the middle XX% of the PSFlet spot fluxes (disregarding those with the", "this is the directory in which the file resides ''' self.xindx = None", "on. The new dimensionality in both x and y is 2*subsize ############################################################# if", "in lenslet array yindx: int Y index of lenslet in lenslet array Returns", "2 + order + 1] = scale * np.cos(phi) return list(coef) def _pullorder(coef,", "iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init = True", ":, :nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam = nlam self.lam_indx = lam_out[:,", "PSFLet class ''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order = order xarr", "float y offset to apply to the central pixel. Default 0 Returns -------", "spot fluxes (disregarding those with the most and the least flux to limit", "list of length (order+1)*(order+2) to be optimized. Notes ----- The list of coefficients", "ValueError(\"Polynomial order must be integer\") # n**2 + 3*n + 2 = (n", "coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1) * (polyorder +", "(order + 1) * (order + 2) coef = np.zeros((n)) coef[0] = x0", "''' self.xindx = None self.yindx = None self.lam_indx = None self.nlam = None", "- 1 if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:", "which the file resides ''' self.xindx = None self.yindx = None self.lam_indx =", "interp_x, interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength", "allcoef=None, order=3): if self.interp_arr is None: if alllam is None or allcoef is", "raise IOError(\"Attempting to save pixel solution to directory \" + outdir + \".", "a good initial guess. ############################################################# log.info(\"Performing final optimization of PSFlet location transformation coefficients", "at `xindx`, `yindx` for desired wavelength `lam` Parameters ---------- lam: float Wavelength in", ">0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") # n**2 + 3*n", "fits to the coefficients of the wavelength solution. These will be used to", "+ inImage.filename) coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s],", "inImage.filename) coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s,", "coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef def return_locations_short(self, coef, xindx,", "optimizer refines these and the higher-order coefficients. This routine seems to be relatively", "+= iy - subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder,", "ValueError(\"Interpolation array has not been computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef,", "= interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x", "directory \" + outdir + \". Directory does not exist.\") outfile = re.sub('//',", "/ (2 * sig**2)) if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same')", "2D ndarray with the X position of all lenslets 2. a 2D ndarray", "-1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1),", "np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy", "1 i += 1 return coef def _transform(x, y, order, coef, highordercoef=None): \"\"\"", "heavy lifting during the wavelength calibration step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'):", "if infile is None: infile = re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist =", "if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration", "X coordinate on the detector interp_y: float Y coordinate on the detector '''", "best fit the lenslets, for all wavelengths order: int Order of the polynomical", "self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile + \" does not", "Obtain extrapolated limits of wavlength solution # to 4% below and 3% above", "int Order of the polynomial fit coef: list of floats List of the", "int order of the polynomial fit trimfrac: float fraction of outliers (high &", "coef += self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self, lam, allcoef, xindx, yindx,", "int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y", "- ix + 1): if ix + iy <= order1: continue _y +=", "3*n + 2 = (n + 1.5)**2 - 0.25 # = (1/4)*((2*n +", "phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an Image class, assumed to", "which to put the file. The file is name PSFloc.fits and is a", "= np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for", "x) gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2)) if inImage.ivar is", "for i in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder + 2))", "float Negative sum of PSFlet fluxes, to be minimized \"\"\" ################################################################# # Use", "\" does not appear to contain a CHARIS wavelength solution in the appropriate", "if not init and fitorder is not None: coef_lin = _pullorder(coef_opt, fitorder) res", "list of floats List of best-fit polynomial coefficients Notes ----- the coefficients, if", "coordinate on the detector interp_y: float Y coordinate on the detector ''' if", "wavelength # solution. Obtain extrapolated limits of wavlength solution # to 4% below", "----- Populates the attribute interp_arr in PSFLet class ''' self.interp_arr = np.zeros((order +", "impact of outliers). Analogous to the trimmed mean. Parameters ---------- coef: list of", "= np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k", "does not appear to contain a CHARIS wavelength solution in the appropriate format.\")", "lam_indx and nlam_max \"\"\" ################################################################### # Read in wavelengths of spots, coefficients of", "deal with the PSFLets on the detector. Does most of the heavy lifting", "Negative sum of PSFlet fluxes, to be minimized \"\"\" ################################################################# # Use np.nan", "of lists floats Polynomial coefficients of wavelength solution order: int Order of polynomial", "<= order: coef_short += [coef[i]] i += 1 return coef_short def _insertorder(coefshort, coef):", "Lowest wavelength in nm lam2: float Highest wavelength in nm Notes ----- This", "x and y is 2*subsize ############################################################# if coef is None: ix_arr = np.arange(0,", "= np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(1, interporder", "of floats List describing the polynomial coefficients that best fit the lenslets, for", "most of the fields of the PSFLet class: the array of xindx, yindx,", "the impact of outliers). Analogous to the trimmed mean. Parameters ---------- coef: list", "None self.order = None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): '''", "+ outdir + \". Directory does not exist.\") outfile = re.sub('//', '/', outdir", "a given lenslet for a given polynomial fit Parameters ---------- coef: lists floats", "= y0 coef[n / 2 + 1] = scale * np.sin(phi) coef[n /", "a grid of # offsets. Seems to be robust down to SNR/PSFlet ~", "np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx, yindx, coeforder, coef) dx += [_dx]", "np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def genpixsol(self,", "Use np.nan for lenslet coordinates outside the CHARIS FOV, # discard these from", "lenslet array order: int Order of polynomial wavelength solution lam1: float Shortest wavelength", "and the least flux to limit the impact of outliers). Analogous to the", "of the coefficient file by # default. ################################################################### if lam1 is None: lam1", "0 subshape = xdim * 3 // 8 _s = x.shape[0] * 3", "incorrect for polynomial order.\") coef = np.zeros((coeforder + 1) * (coeforder + 2))", "infiledir: String Directory in which the file resides ''' if infile is None:", "lenslet in lenslet array Returns ------- interp_x: float X coordinate on the detector", "of PSFlet location transformation coefficients for frame \" + inImage.filename) coef_lin = _pullorder(coef_opt,", "from the calculation before trimming. ################################################################# _x, _y = _transform(x, y, order, coef,", "little less). Important note: as of now (09/2015), the number of lenslets to", "or allcoef is None: raise ValueError(\"Interpolation array has not been computed. Must call", "order: int Order of the polynomical fit lam1: float Lowest wavelength in nm", "for iy in range(coeforder - ix + 1): if ix + iy <=", "transform the coordinates using a polynomial. Parameters ---------- x: ndarray Rectilinear grid y:", "+ np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr", "unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same')", "initial optimization of PSFlet location transformation coefficients for frame \" + inImage.filename) coef_lin", "_insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell',", "1)) for i in range(1, order + 1): xarr[:, i] = np.log(lam)**i for", "[_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def", "given to transform the coordinates using a polynomial. Parameters ---------- x: ndarray Rectilinear", "float Shortest wavelength in nm lam2: float Longest wavelength in nm Returns -------", ":nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam = nlam self.lam_indx = lam_out[:, :,", "= hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile + \" does not appear", "coeforder, coef) return interp_x, interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\"", "lenslets to grid is hard-coded as 1/10 the dimensionality of the final array.", "= interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy,", "grid of spots with read noise and shot noise, and returns the esimated", "in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef def return_locations_short(self,", "lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix,", "order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder + 1) * (coeforder", "iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix,", "i])[0] self.interp_arr[:, i] = coef def return_locations_short(self, coef, xindx, yindx): ''' Returns the", "list initial guess of the coefficients of polynomial coordinate transformation trimfrac: float fraction", "int Order of polynomial wavelength solution lam1: float Shortest wavelength in nm lam2:", "be integer\") # n**2 + 3*n + 2 = (n + 1.5)**2 -", "(order + 1) * (order + 2): pass # raise ValueError(\"Number of coefficients", "[coef[i]] i += 1 for ix in range(coeforder + 1): for iy in", "---------- order: int The polynomial order of the grid distortion scale: float The", "ix + iy <= shortorder: coef[i] = coefshort[j] j += 1 i +=", "coordinate on the detector interp_y: float Y coordinate on the detector ''' coeforder", "= scale * np.sin(phi) coef[n / 2 + order + 1] = scale", "of floats Rectilinear grid order: int Order of the polynomial fit coef: list", "coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an Image class,", "np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for ix", "we use a grid of # offsets. Seems to be robust down to", "_pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell',", "4% below and 3% above limits of the coefficient file by # default.", "with read noise and shot noise, and returns the esimated positions of the", "= copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for", "= np.linspace(lam1, lam2, n_spline) dy = [] dx = [] for i in", "grid of # offsets. Seems to be robust down to SNR/PSFlet ~ 1", "compute its inverse. Parameters ---------- lam: float Wavelength in nm allcoef: list of", "are now at a slightly higher wavelength, so try out offsets # that", "> 5) * (_y < ydim - 5) return [_x, _y, good, coef_opt]", "Notes ----- Populates the attribute interp_arr in PSFLet class ''' self.interp_arr = np.zeros((order", "1): xarr[:, i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:,", "and 3% above limits of the coefficient file by # default. ################################################################### if", "is not None: coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y,", "detector interp_y: float Y coordinate on the detector ''' if len(allcoef.shape) == 1:", "+ 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] = y2", "[] dx = [] for i in range(n_spline): coef = np.zeros((coeforder + 1)", "have failed for frame \" + inImage.filename) _x, _y = _transform(x, y, polyorder,", "if self.interp_arr is None: if alllam is None or allcoef is None: raise", "= None self.lam_indx = None self.nlam = None self.nlam_max = None self.interp_arr =", "outliers (high & low combined) to trim Default 0.1 (5% trimmed on the", "np.zeros(np.asarray(y).shape) i = 0 for ix in range(order1 + 1): for iy in", "3)**2 - 1) = len(coef) order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 +", "if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx,", "pix_x = interp_x[:, ix, iy] pix_y = interp_y[:, ix, iy] if np.all(pix_x <", "coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return", "wavelength, so try out offsets # that are slightly to the right to", "array Array of wavelengths R: float Effective spectral resolution ''' if lam1 is", "float The pitch angle of the lenslets. Default atan(1.926) x0: float x offset", "2. a 2D ndarray with the Y position of all lenslets 3. a", "''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0],", "2048) or np.all(pix_y < 0) or np.all(pix_y > 2048): continue if pix_y[-1] <", "= None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing", "0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init =", "3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for iy", "initial optimization of PSFlet location transformation coefficients for frame \" + inImage.filename) res", "+ 0.25) - 1.5 + 1e-12) coef_short = [] i = 0 for", "self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile +", "coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) coef_short = [] i", "# We want to start with a decent guess, so we use a", "for ix in ix_arr: for iy in iy_arr: if init: coef = _initcoef(polyorder,", "is the directory in which the file resides ''' self.xindx = None self.yindx", "0. the list of wavelengths at which the calibration is done 1. a", "of all lenslets 2. a 2D ndarray with the Y position of all", "(coeforder + 2)) for k in range(interporder + 1): coef += self.interp_arr[k] *", "except: print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2", "1) * (polyorder + 2) / 2] += subshape ############################################################# # If we", "good:2D boolean ndarray True for lenslets with spots inside the detector footprint coef:", "/ 2. - subshape, y0=iy + ydim / 2. - subshape, scale=scale, phi=phi)", "ndarray with the number of valid wavelengths for a given lenslet (some wavelengths", "= fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam,", "coef[n / 2 + 1] = scale * np.sin(phi) coef[n / 2 +", "allcoef, order=3): ''' Set up array to solve for best-fit polynomial fits to", "iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]]", "+ 2)) for k in range(interporder + 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k", "xdim = inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1)", "subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt", "polyorder, trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing initial optimization of PSFlet location", "if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or", "else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same')", "file infiledir: String Directory in which the file resides ''' if infile is", "coordinates on the detector array. # Then optimize the coefficients. # We want", "like x**3 and x**2*y, but not x**3*y). It is all zeros in the", "* 3 // 8 _s = x.shape[0] * 3 // 8 subfiltered =", "''' if infile is None: infile = re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist", "location transformation coefficients\") init = True else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr", "wavelength solution xindx: int X index of lenslet in lenslet array yindx: int", "as np from astropy.io import fits from scipy import interpolate, ndimage, optimize, signal", "polynomial. Parameters ---------- x: ndarray Rectilinear grid y: ndarray of floats Rectilinear grid", "x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval < bestval: bestval =", "in range(interporder + 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx,", "= int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12) i = 0 j =", "len(coef) order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape)", "args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x else:", "1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef)", "1): if ix + iy <= order: coef_short += [coef[i]] i += 1", "= ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for iy in iy_arr: if init:", "fits on. The new dimensionality in both x and y is 2*subsize #############################################################", "+ 2) / 2] += iy - subshape newval = _corrval(coef, x[_s:-_s, _s:-_s],", "within a microspectrum Parameters ---------- lam: float Wavelength in nm allcoef: list of", "np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000]))", "a set of coefficients including a rotation matrix plus zeros. Parameters ---------- order:", "################################################################# # Use np.nan for lenslet coordinates outside the CHARIS FOV, # discard", "of coefficients has space for a polynomial fit of the input order (i.e.,", "n = (order + 1) * (order + 2) coef = np.zeros((n)) coef[0]", "offsets # that are slightly to the right to get a good initial", "the PSFlet spot fluxes (disregarding those with the most and the least flux", "save pixel solution to directory \" + outdir + \". Directory does not", "center. ############################################################# gridfrac = 20 ydim, xdim = inImage.data.shape x = np.arange(-(ydim //", "a monochromatic grid of spots with read noise and shot noise, and returns", "floats List of best-fit polynomial coefficients Notes ----- the coefficients, if not supplied,", "ix + iy <= order1: continue _y += coef[i] * x**ix * y**iy", "# Use np.nan for lenslet coordinates outside the CHARIS FOV, # discard these", "1) * (coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for", "guess of the coefficients of polynomial coordinate transformation trimfrac: float fraction of lenslet", "== allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") coef = np.zeros((coeforder", "lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns the spectral resolution and", "use a grid of # offsets. Seems to be robust down to SNR/PSFlet", "x**3 and x**2*y, but not x**3*y). It is all zeros in the output", "order > 5: raise ValueError(\"Polynomial order must be >0, <=5\") except: raise ValueError(\"Polynomial", "we assume that we # are now at a slightly higher wavelength, so", "order (i.e., for order 3, up to terms like x**3 and x**2*y, but", "be integer, coef should be a list.\") try: if not order == int(order):", "ratios of order unity (or even a little less). Important note: as of", "fit for a single wavelength xindx: int X index of lenslet in lenslet", "1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of lenslet IDs, Lenslet", "i += 1 for ix in range(coeforder + 1): for iy in range(coeforder", "range(interporder + 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx,", "trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing initial optimization of PSFlet location transformation", "number of lenslets to grid is hard-coded as 1/10 the dimensionality of the", "if newval < bestval: bestval = newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing", "inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered)", "spot centroids in x. y: 2D ndarray Estimated spot centroids in y. good:2D", "List of best-fit polynomial coefficients Notes ----- the coefficients, if not supplied, are", "array. # Then optimize the coefficients. # We want to start with a", "and shot noise, and returns the esimated positions of the spot centroids. This", "n_spline = 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam =", "of now (09/2015), the number of lenslets to grid is hard-coded as 1/10", "slice indices for subimages to perform the intial # fits on. The new", "+ 1): for iy in range(order1 - ix + 1): _x += coef[i]", "to apply to the central pixel. Default 0 y0: float y offset to", "self.interp_arr is None: if alllam is None or allcoef is None: raise ValueError(\"Interpolation", "IDs, Lenslet (0, 0) is the center. ############################################################# gridfrac = 20 ydim, xdim", "all wavelengths order: int Order of the polynomical fit lam1: float Lowest wavelength", "overwrite=True) except: raise def geninterparray(self, lam, allcoef, order=3): ''' Set up array to", "def return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns the spectral", "(polyorder + 2) / 2] += subshape ############################################################# # If we have coefficients", "try: if not len(coef) == (order + 1) * (order + 2): pass", "fit the lenslets, for all wavelengths order: int Order of the polynomical fit", "but not x**3*y). It is all zeros in the output apart from the", "self.order = order xarr = np.ones((lam.shape[0], order + 1)) for i in range(1,", "---------- imImage: Image class Assumed to be a monochromatic grid of spots polyorder:", "y0 coef[n / 2 + 1] = scale * np.sin(phi) coef[n / 2", "= None self.order = None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'):", "int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y", "Boolean Returns ------- _x: ndarray Transformed coordinates _y: ndarray Transformed coordinates \"\"\" try:", "1 return coef def _transform(x, y, order, coef, highordercoef=None): \"\"\" Private function _transform", "''' if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y =", "* x**ix * y**iy i += 1 return [_x, _y] def _corrval(coef, x,", "centroids. Should be close to the true value for the PSF-let spots. Default", "alllam is None or allcoef is None: raise ValueError(\"Interpolation array has not been", "lam1 is None: lam1 = np.amin(lam) / 1.04 if lam2 is None: lam2", "array order: int Order of polynomial wavelength solution Returns ------- interp_x: float X", "quick check to find reasonable offsets in x and y. With all of", "trim Default 0.1 (5% trimmed on the high end, 5% on the low", "be integer\") n = (order + 1) * (order + 2) coef =", "xindx, yindx, nlam, lam_indx and nlam_max \"\"\" ################################################################### # Read in wavelengths of", "2*subsize ############################################################# if coef is None: ix_arr = np.arange(0, 14, 0.5) iy_arr =", "1e-5}) coef_opt = res.x if not res.success: log.info(\"Optimizing PSFlet location transformation coefficients may", "< 0) or np.all(pix_y > 2048): continue if pix_y[-1] < pix_y[0]: try: tck_y", "the negative of the sum of the middle XX% of the PSFlet spot", "ix, iy] pix_y = interp_y[:, ix, iy] if np.all(pix_x < 0) or np.all(pix_x", "/ 2] = y0 coef[n / 2 + 1] = scale * np.sin(phi)", "+ ydim / 2. - subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0]", "coef: list of floats List of the coefficients. Must match the length required", "i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef def", "# raise ValueError(\"Number of coefficients incorrect for polynomial order.\") except: raise AttributeError(\"order must", "''' Calculates the detector coordinates of lenslet located at `xindx`, `yindx` for desired", "> 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048): continue if pix_y[-1]", "Parameters ---------- x: ndarray Rectilinear grid y: ndarray of floats Rectilinear grid order:", "1.04 if lam2 is None: lam2 = np.amax(lam) * 1.03 interporder = order", "int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) coef_short = [] i = 0", "* np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def", "+= [_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R", "interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1,", "''' Saves wavelength calibration file Parameters ---------- outdir: String Directory in which to", "String If load is True, this is the name of the file infiledir:", "1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef)", "= None self.nlam = None self.nlam_max = None self.interp_arr = None self.order =", "0 Returns ------- coef: list of floats A list of length (order+1)*(order+2) to", "matrix given by scale and phi. \"\"\" try: if not order == int(order):", "& low combined) to trim Default 0.1 (5% trimmed on the high end,", "xindx, yindx, order=3): ''' Calculates the detector coordinates of lenslet located at `xindx`,", "of polynomial wavelength solution lam1: float Shortest wavelength in nm lam2: float Longest", "ValueError(\"Polynomial order must be integer\") n = (order + 1) * (order +", "so we use a grid of # offsets. Seems to be robust down", "0.25) - 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 +", "signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid", "convolved with gaussian PSFlet order: int order of the polynomial fit trimfrac: float", "if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data", "x, y: Grid of lenslet IDs, Lenslet (0, 0) is the center. #############################################################", "of coefficients incorrect for polynomial order.\") xindx = np.arange(-100, 101) xindx, yindx =", "- subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if", "lam, allcoef, xindx, yindx, order=3): ''' Calculates the detector coordinates of lenslet located", "_dx, _dy = _transform(xindx, yindx, coeforder, coef) dx += [_dx] dy += [_dy]", "polynomial wavelength solution lam1: float Shortest wavelength in nm lam2: float Longest wavelength", "of the fields of the PSFLet class: the array of xindx, yindx, nlam,", "raise ValueError(\"Polynomial order must be integer\") # n**2 + 3*n + 2 =", "loop then does a quick check to find reasonable offsets in x and", "of floats coefficients for polynomial transformation x: ndarray coordinates of lenslets y: ndarray", "class ''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order = order xarr =", "pitch angle of the lenslets. Default atan(1.926) x0: float x offset to apply", "a monochromatic grid of spots polyorder: float order of the polynomial coordinate transformation.", "Set up array to solve for best-fit polynomial fits to the coefficients of", "and ultimately to compute its inverse. Parameters ---------- lam: float Wavelength in nm", "ix + iy <= order: coef_short += [coef[i]] i += 1 return coef_short", "~ 1 # Create slice indices for subimages to perform the intial #", "gridfrac), ydim // gridfrac + 1) x, y = np.meshgrid(x, x) ############################################################# #", "2) / 2] += iy - subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s,", "1): for iy in range(order1 - ix + 1): if ix + iy", "''' Initialize the class Parameters ---------- load: Boolean Whether to load an already-existing", "xdim / 2. - subshape, y0=iy + ydim / 2. - subshape, scale=scale,", "iy] = y2 - y1 + 1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1,", "coordinates using a polynomial. Parameters ---------- x: ndarray Rectilinear grid y: ndarray of", "the CHARIS FOV, # discard these from the calculation before trimming. ################################################################# _x,", "Notes ----- the coefficients, if not supplied, are initially set to the known", "start with a decent guess, so we use a grid of # offsets.", "= np.arange(-(ydim // gridfrac), ydim // gridfrac + 1) x, y = np.meshgrid(x,", "if ix + iy <= order: coef_short += [coef[i]] i += 1 return", "2) vals_sorted = np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage,", "ndarray coordinates of lenslets y: ndarray coordinates of lenslets filtered: ndarray image convolved", "0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\") init = False", "= np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7,", "coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1): coef += self.interp_arr[k] *", "file Parameters ---------- outdir: String Directory in which to put the file. The", "to the right to get a good initial guess. ############################################################# log.info(\"Performing final optimization", "0.25) - 1.5 + 1e-12) i = 0 j = 0 for ix", "Then optimize the coefficients. # We want to start with a decent guess,", "= hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" +", "polynomial transformation x: ndarray coordinates of lenslets y: ndarray coordinates of lenslets filtered:", "lenslets, for all wavelengths order: int Order of the polynomical fit lam1: float", "coef[i] * x**ix * y**iy i += 1 for ix in range(order2 +", "_transform in locate_psflets Apply the coefficients given to transform the coordinates using a", "Image log = logging.getLogger('main') class PSFLets: \"\"\" Helper class to deal with the", "xarr = np.ones((lam.shape[0], order + 1)) for i in range(1, order + 1):", "in the minimization. Default 0.1 (5% trimmed on the high end, 5% on", "wavelength calibration step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the class", "iy <= order1: continue _y += coef[i] * x**ix * y**iy i +=", "per-lenslet signal-to-noise ratios of order unity (or even a little less). Important note:", "3 // 8 _s = x.shape[0] * 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape,", "optimization of PSFlet location transformation coefficients for frame \" + inImage.filename) res =", "class: the array of xindx, yindx, nlam, lam_indx and nlam_max \"\"\" ################################################################### #", "in locate_psflets Create a set of coefficients including a rotation matrix plus zeros.", "array Parameters ---------- lam: float Wavelength in nm allcoef: list of lists floats", "= lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0,", "polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an", "footprint coef: list of floats List of best-fit polynomial coefficients Notes ----- the", "self.lam_indx = None self.nlam = None self.nlam_max = None self.interp_arr = None self.order", "spot centroids. This is designed to constrain the domain of the PSF-let fitting", "xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns the spectral resolution and interpolated wavelength", "polynomial coordinate transformation trimfrac: float fraction of lenslet outliers (high & low combined)", "have coefficients from last time, we assume that we # are now at", "+= 1 return [_x, _y] def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None):", "[_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac", "Grid of lenslet IDs, Lenslet (0, 0) is the center. ############################################################# gridfrac =", "returns the esimated positions of the spot centroids. This is designed to constrain", "res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt),", "outdir: String Directory in which to put the file. The file is name", "def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) coef_short", "for polynomial order.\") coef = np.zeros((coeforder + 1) * (coeforder + 2)) for", "IOError(\"Attempting to save pixel solution to directory \" + outdir + \". Directory", "range(1, interporder + 1): coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1)", "Returns ------- _x: ndarray Transformed coordinates _y: ndarray Transformed coordinates \"\"\" try: if", "to transform the coordinates using a polynomial. Parameters ---------- x: ndarray Rectilinear grid", "coefficients set, the optimizer refines these and the higher-order coefficients. This routine seems", "ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y", "* (_x < xdim - 5) * (_y > 5) * (_y <", "a CHARIS wavelength solution in the appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self,", "self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \"", "coef: list of floats A list of length (order+1)*(order+2) to be optimized. Notes", "for subimages to perform the intial # fits on. The new dimensionality in", "detector area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution to", "x: 2D ndarray Estimated spot centroids in x. y: 2D ndarray Estimated spot", "int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder + 1) * (coeforder + 2) ==", "the known pitch angle and scale. A loop then does a quick check", "a 2D ndarray with the number of valid wavelengths for a given lenslet", "_transform(x, y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False)", "Order of the polynomical fit lam1: float Lowest wavelength in nm lam2: float", "k in range(interporder + 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] =", "PSFlets. Default 15.02. phi: float The pitch angle of the lenslets. Default atan(1.926)", "wavelengths of spots, coefficients of wavelength # solution. Obtain extrapolated limits of wavlength", "1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy,", "in ix_arr: for iy in iy_arr: if init: coef = _initcoef(polyorder, x0=ix +", "astropy.io import fits from scipy import interpolate, ndimage, optimize, signal try: from charis.image", "self.interp_arr = None self.order = None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None,", "index of lenslet in lenslet array Returns ------- interp_x: float X coordinate on", "_s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing initial", "Default atan(1.926) x0: float x offset to apply to the central pixel. Default", "int(order): raise ValueError(\"Polynomial order must be integer\") else: if order < 1 or", "appear to contain a CHARIS wavelength solution in the appropriate format.\") self.nlam_max =", "the file infiledir: String If load is True, this is the directory in", "the grid of centroids. Should be close to the true value for the", "[int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] =", "scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder +", "_transform(x, y, order, coef, highordercoef=None): \"\"\" Private function _transform in locate_psflets Apply the", "coefficients with previous values\") init = False bestval = 0 subshape = xdim", "lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength at the center of", "= _transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape)", "raise ValueError(\"Polynomial order must be >0, <=5\") except: raise ValueError(\"Polynomial order must be", "float standard deviation of convolving Gaussian used for estimating the grid of centroids.", "return interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr is None:", "previous values\") init = False bestval = 0 subshape = xdim * 3", "_y] def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval", "in nm allcoef: list of floats List describing the polynomial coefficients that best", "+= 1 i += 1 return coef def _transform(x, y, order, coef, highordercoef=None):", "before trimming. ################################################################# _x, _y = _transform(x, y, order, coef, highordercoef) vals =", "be a monochromatic grid of spots polyorder: float order of the polynomial coordinate", "and the higher-order coefficients. This routine seems to be relatively robust down to", "for iy in iy_arr: if init: coef = _initcoef(polyorder, x0=ix + xdim /", "slightly to the right to get a good initial guess. ############################################################# log.info(\"Performing final", "of floats A list of length (order+1)*(order+2) to be optimized. Notes ----- The", "locate_psflets Create a set of coefficients including a rotation matrix plus zeros. Parameters", "iy] if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y < 0)", "x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max in", "coef[n / 2] = y0 coef[n / 2 + 1] = scale *", "note: as of now (09/2015), the number of lenslets to grid is hard-coded", "coefficients. # We want to start with a decent guess, so we use", "y, order, coef, highordercoef=None): \"\"\" Private function _transform in locate_psflets Apply the coefficients", "2] = y0 coef[n / 2 + 1] = scale * np.sin(phi) coef[n", "+= [coef[i]] i += 1 return coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef)", "to trim in the minimization. Default 0.1 (5% trimmed on the high end,", "order.\") xindx = np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline = 100", "float Wavelength in nm allcoef: list of floats Polynomial coefficients of wavelength solution", "np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function _initcoef in", "coordinates outside the CHARIS FOV, # discard these from the calculation before trimming.", "shot noise, and returns the esimated positions of the spot centroids. This is", "5) * (_x < xdim - 5) * (_y > 5) * (_y", "interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def genpixsol(self, lam, allcoef,", "relatively robust down to per-lenslet signal-to-noise ratios of order unity (or even a", "= _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1) * (polyorder + 2)", "PSFlet location transformation coefficients for frame \" + inImage.filename) coef_lin = _pullorder(coef_opt, 1)", "class Parameters ---------- load: Boolean Whether to load an already-existing wavelength calibration file", "np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr is", "1.5 + 1e-12) coef_short = [] i = 0 for ix in range(coeforder", "prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac / 2) vals_sorted =", "of the PSFLet class: the array of xindx, yindx, nlam, lam_indx and nlam_max", "+ iy <= order: coef_short += [coef[i]] i += 1 return coef_short def", "############################################################# x = np.arange(-1 * int(3 * sig + 1), int(3 * sig", "np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\") init", "---------- coef: list of floats coefficients for polynomial transformation x: ndarray coordinates of", "R: float Effective spectral resolution ''' if lam1 is None: lam1 = np.amin(lam)", "< 0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y >", "if lam2 is None: lam2 = np.amax(lam) * 1.03 interporder = order if", "Returns ------- x: 2D ndarray Estimated spot centroids in x. y: 2D ndarray", "index of lenslet in lenslet array order: int Order of polynomial wavelength solution", "to be relatively robust down to per-lenslet signal-to-noise ratios of order unity (or", "np.zeros((order + 1, allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0], order + 1))", "is None or allcoef is None: raise ValueError(\"Interpolation array has not been computed.", "floats A list of length (order+1)*(order+2) to be optimized. Notes ----- The list", "smooth/interpolate the wavelength solution, and ultimately to compute its inverse. Parameters ---------- lam:", "lenslet spacing. \"\"\" ############################################################# # Convolve with a Gaussian, centroid the filtered image.", "wavelength solution order: int Order of polynomial wavelength solution Notes ----- Populates the", "in both x and y is 2*subsize ############################################################# if coef is None: ix_arr", "y2 + 1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y)", "String Directory in which the file resides ''' if infile is None: infile", "return coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 +", "to solve for best-fit polynomial fits to the coefficients of the wavelength solution.", "interp_lam: array Array of wavelengths R: float Effective spectral resolution ''' if lam1", "raise ValueError(\"Number of coefficients incorrect for polynomial order.\") xindx = np.arange(-100, 101) xindx,", "trimfrac: float fraction of outliers (high & low combined) to trim Default 0.1", "is designed to constrain the domain of the PSF-let fitting later in the", "be robust down to SNR/PSFlet ~ 1 # Create slice indices for subimages", "array order: int Order of polynomial wavelength solution lam1: float Shortest wavelength in", "else: order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12) i = 0", "coef[n / 2 + order + 1] = scale * np.cos(phi) return list(coef)", "import Image except: from image import Image log = logging.getLogger('main') class PSFLets: \"\"\"", "= _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def return_res(self, lam, allcoef, xindx,", "+= 1 for ix in range(coeforder + 1): for iy in range(coeforder -", "= interp_y[:, ix, iy] if np.all(pix_x < 0) or np.all(pix_x > 2048) or", "1): if ix + iy <= order1: continue _y += coef[i] * x**ix", "ix in range(order2 + 1): for iy in range(order1 - ix + 1):", "the polynomial coefficients that best fit the lenslets, for all wavelengths order: int", "+= self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x,", "+= 1 for ix in range(order1 + 1): for iy in range(order1 -", "int(vals_ok.shape[0] * trimfrac / 2) vals_sorted = np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip])", "(coeforder + 2)) for k in range(1, interporder + 1): coef += k", "2] += iy - subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered,", "unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x,", "out offsets # that are slightly to the right to get a good", "order.\") except: raise AttributeError(\"order must be integer, coef should be a list.\") try:", "iy]], tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0): break", "- 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12)", "highordercoef: boolean Returns ------- score: float Negative sum of PSFlet fluxes, to be", "alllam=None, allcoef=None, order=3): if self.interp_arr is None: if alllam is None or allcoef", "to be a monochromatic grid of spots polyorder: float order of the polynomial", "0.1 (5% trimmed on the high end, 5% on the low end) Returns", "solution lam1: float Shortest wavelength in nm lam2: float Longest wavelength in nm", "+ 2 = (n + 1.5)**2 - 0.25 # = (1/4)*((2*n + 3)**2", "call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k in", "center of each pixel within a microspectrum Parameters ---------- lam: float Wavelength in", "by order = (order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray Transformed coordinates _y:", "for k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k return coef", "frame \" + inImage.filename) _x, _y = _transform(x, y, polyorder, coef_opt) ############################################################# #", "is hard-coded as 1/10 the dimensionality of the final array. This is sufficient", "if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number", "_transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def return_res(self, lam, allcoef, xindx, yindx,", "i in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder + 2)) for", "np.all(y[:, :, nlam_max] == 0): break self.xindx = x[:, :, :nlam_max] self.yindx =", "+= self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef) x =", "else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y)) + 1,", "allcoef: list of lists floats Polynomial coefficients of wavelength solution order: int Order", "x) ############################################################# # Set up polynomial coefficients, convert from lenslet # coordinates to", "signal try: from charis.image import Image except: from image import Image log =", "wavelengths fall outside of the detector area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting", "the PSFlets. Default 15.02. phi: float The pitch angle of the lenslets. Default", "coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt =", "of fit for a single wavelength xindx: int X index of lenslet in", "pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y)", "calibration is done 1. a 2D ndarray with the X position of all", "xindx, yindx): ''' Returns the x,y detector location of a given lenslet for", "in range(coeforder - ix + 1): if ix + iy <= order: coef_short", "a decent guess, so we use a grid of # offsets. Seems to", "to directory \" + outdir + \". Directory does not exist.\") outfile =", "savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters ---------- outdir: String Directory in", "= np.meshgrid(xindx, xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y =", "1. a 2D ndarray with the X position of all lenslets 2. a", "the directory in which the file resides ''' self.xindx = None self.yindx =", "'/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def", "function _corrval in locate_psflets Return the negative of the sum of the middle", "* (order + 2) coef = np.zeros((n)) coef[0] = x0 coef[1] = scale", "def geninterparray(self, lam, allcoef, order=3): ''' Set up array to solve for best-fit", "int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] = y2 - y1", "2) / 2] += subshape ############################################################# # If we have coefficients from last", "if order < 1 or order > 5: raise ValueError(\"Polynomial order must be", "detector coordinates of lenslet located at `xindx`, `yindx` for desired wavelength `lam` Parameters", "Set up polynomial coefficients, convert from lenslet # coordinates to coordinates on the", "infile: String Name of the file infiledir: String Directory in which the file", "hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File", "fiducial lenslet spacing. \"\"\" ############################################################# # Convolve with a Gaussian, centroid the filtered", "perform the intial # fits on. The new dimensionality in both x and", "spectral resolution ''' if lam1 is None: lam1 = np.amin(lam) / 1.04 if", "= order if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) -", "coef, highordercoef=None): \"\"\" Private function _transform in locate_psflets Apply the coefficients given to", "on the low end) highordercoef: boolean Returns ------- score: float Negative sum of", "in nm allcoef: list of floats Polynomial coefficients of wavelength solution xindx: int", "> 5: raise ValueError(\"Polynomial order must be >0, <=5\") except: raise ValueError(\"Polynomial order", "coef = np.zeros((n)) coef[0] = x0 coef[1] = scale * np.cos(phi) coef[order +", "Name of the file infiledir: String Directory in which the file resides '''", "25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init = True else: ix_arr =", "def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function _initcoef in locate_psflets", "re.sub('//', '/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile,", "wavelength xindx: int X index of lenslet in lenslet array yindx: int Y", "1): coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy =", "iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix, iy]] =", "else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save =", "* y**iy i += 1 if highordercoef is None: return [_x, _y] else:", "_transform(x, y, polyorder, coef_opt) ############################################################# # Boolean: do the lenslet PSFlets lie within", "<= order1: continue _x += coef[i] * x**ix * y**iy i += 1", "coefficients given to transform the coordinates using a polynomial. Parameters ---------- x: ndarray", "interp_y def return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None): ''' Returns the", "wavelength calibration file Parameters ---------- outdir: String Directory in which to put the", "int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape)", "If load is True, this is the directory in which the file resides", "ix in range(order1 + 1): for iy in range(order1 - ix + 1):", "= np.ones((lam.shape[0], order + 1)) for i in range(1, order + 1): xarr[:,", "coef is None: ix_arr = np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25, 0.5)", "centroids in y. good:2D boolean ndarray True for lenslets with spots inside the", "0 j = 0 for ix in range(coeforder + 1): for iy in", "* trimfrac / 2) vals_sorted = np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return", "def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) shortorder", "5: raise ValueError(\"Polynomial order must be >0, <=5\") except: raise ValueError(\"Polynomial order must", "tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))]", "int(3 * sig + 1), int(3 * sig + 1) + 1) x,", "a 2D ndarray with the Y position of all lenslets 3. a 2D", "0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with", "1 return coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5", "geninterparray(self, lam, allcoef, order=3): ''' Set up array to solve for best-fit polynomial", "- 1 n_spline = 100 interp_lam = np.linspace(lam1, lam2, n_spline) dy = []", "----- This functions fills in most of the fields of the PSFLet class:", "Default 0 y0: float y offset to apply to the central pixel. Default", "arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1):", "float Highest wavelength in nm Notes ----- This functions fills in most of", "allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam = np.linspace(lam1,", "minimized \"\"\" ################################################################# # Use np.nan for lenslet coordinates outside the CHARIS FOV,", "wavelengths order: int Order of the polynomical fit lam1: float Lowest wavelength in", "fluxes, to be minimized \"\"\" ################################################################# # Use np.nan for lenslet coordinates outside", "1/10 the dimensionality of the final array. This is sufficient to cover the", "logging import os import re import numpy as np from astropy.io import fits", "return coef def return_locations(self, lam, allcoef, xindx, yindx, order=3): ''' Calculates the detector", "coef_short = [] i = 0 for ix in range(coeforder + 1): for", "coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))]", "set to the known pitch angle and scale. A loop then does a", "= x0 coef[1] = scale * np.cos(phi) coef[order + 1] = -scale *", "scale * np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25)", "Notes ----- This functions fills in most of the fields of the PSFLet", "3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\") init =", "y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x", "Directory does not exist.\") outfile = re.sub('//', '/', outdir + '/PSFloc.fits') out =", "pipeline. Parameters ---------- imImage: Image class Assumed to be a monochromatic grid of", "dx += [_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam,", "of lenslet in lenslet array Returns ------- interp_x: float X coordinate on the", "(i.e., for order 3, up to terms like x**3 and x**2*y, but not", "= res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x, y,", "Default 0.1 (5% trimmed on the high end, 5% on the low end)", "* sig + 1) + 1) x, y = np.meshgrid(x, x) gaussian =", "+ 1) + 1) x, y = np.meshgrid(x, x) gaussian = np.exp(-(x**2 +", "== allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") xindx = np.arange(-100,", "2] += subshape ############################################################# # If we have coefficients from last time, we", "int The polynomial order of the grid distortion scale: float The linear separation", "single wavelength xindx: int X index of lenslet in lenslet array yindx: int", "raise ValueError(\"Interpolation array has not been computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam,", "pixels of the PSFlets. Default 15.02. phi: float The pitch angle of the", "ndarray Transformed coordinates \"\"\" try: if not len(coef) == (order + 1) *", "lenslets y: ndarray coordinates of lenslets filtered: ndarray image convolved with gaussian PSFlet", "With all of the first-order polynomial coefficients set, the optimizer refines these and", "polynomial fit Parameters ---------- coef: lists floats Polynomial coefficients of fit for a", "= (n + 1.5)**2 - 0.25 # = (1/4)*((2*n + 3)**2 - 1)", "\"\"\" try: if not order == int(order): raise ValueError(\"Polynomial order must be integer\")", "############################################################# if coef is None: ix_arr = np.arange(0, 14, 0.5) iy_arr = np.arange(0,", "float X coordinate on the detector interp_y: float Y coordinate on the detector", "lenslet in lenslet array order: int Order of polynomial wavelength solution Returns -------", "sufficient to cover the detector for the fiducial lenslet spacing. \"\"\" ############################################################# #", "= scale * np.cos(phi) coef[order + 1] = -scale * np.sin(phi) coef[n /", "solve for best-fit polynomial fits to the coefficients of the wavelength solution. These", "= np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3):", "x offset to apply to the central pixel. Default 0 y0: float y", "the file resides ''' self.xindx = None self.yindx = None self.lam_indx = None", "np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k return", "boolean ndarray True for lenslets with spots inside the detector footprint coef: list", "space for a polynomial fit of the input order (i.e., for order 3,", "init and fitorder is not None: coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval,", "list of floats coefficients for polynomial transformation x: ndarray coordinates of lenslets y:", "1) x, y = np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2) / (2", "integer\") else: if order < 1 or order > 5: raise ValueError(\"Polynomial order", "wavelength array Parameters ---------- lam: float Wavelength in nm allcoef: list of lists", "of the coefficients. Must match the length required by order = (order+1)*(order+2) highordercoef:", "of lenslets filtered: ndarray image convolved with gaussian PSFlet order: int order of", "not res.success: log.info(\"Optimizing PSFlet location transformation coefficients may have failed for frame \"", "/ 1.04 if lam2 is None: lam2 = np.amax(lam) * 1.03 interporder =", "1): _y += coef[i] * x**ix * y**iy i += 1 if highordercoef", "s=0) except: print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1,", "of spots with read noise and shot noise, and returns the esimated positions", "load: Boolean Whether to load an already-existing wavelength calibration file infile: String If", "interp_y: float Y coordinate on the detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1", "location transformation coefficients for frame \" + inImage.filename) if not init and fitorder", "imImage: Image class Assumed to be a monochromatic grid of spots polyorder: float", "''' Loads existing wavelength calibration file Parameters ---------- infile: String Name of the", "trim in the minimization. Default 0.1 (5% trimmed on the high end, 5%", "import os import re import numpy as np from astropy.io import fits from", "y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval < bestval: bestval = newval coef_opt", "np.arange(-1 * int(3 * sig + 1), int(3 * sig + 1) +", "of floats Polynomial coefficients of wavelength solution xindx: int X index of lenslet", "nlam_max \"\"\" ################################################################### # Read in wavelengths of spots, coefficients of wavelength #", "n_spline = 100 interp_lam = np.linspace(lam1, lam2, n_spline) dy = [] dx =", "in PSFLet class ''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order = order", "the fiducial lenslet spacing. \"\"\" ############################################################# # Convolve with a Gaussian, centroid the", "lam2=None): ''' Returns the spectral resolution and interpolated wavelength array Parameters ---------- lam:", "or order > 5: raise ValueError(\"Polynomial order must be >0, <=5\") except: raise", "log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame \" + inImage.filename)", "= np.arange(-1 * int(3 * sig + 1), int(3 * sig + 1)", "of the wavelength solution. These will be used to smooth/interpolate the wavelength solution,", "the polynomial fit trimfrac: float fraction of outliers (high & low combined) to", "the optimizer refines these and the higher-order coefficients. This routine seems to be", "def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr is None: if alllam is", "of coefficients incorrect for polynomial order.\") coef = np.zeros((coeforder + 1) * (coeforder", "= (_x > 5) * (_x < xdim - 5) * (_y >", "= res.x if not res.success: log.info(\"Optimizing PSFlet location transformation coefficients may have failed", "2D ndarray Estimated spot centroids in y. good:2D boolean ndarray True for lenslets", "coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1) * (polyorder + 2) / 2]", "is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar,", "PSFloc.fits and is a multi-extension FITS file, each extension corresponding to: 0. the", "for polynomial transformation x: ndarray coordinates of lenslets y: ndarray coordinates of lenslets", "else: res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5,", "1) * (order + 2) coef = np.zeros((n)) coef[0] = x0 coef[1] =", "coeforder, allcoef) return interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder", "lenslet coordinates outside the CHARIS FOV, # discard these from the calculation before", "= newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of PSFlet location", "the high end, 5% on the low end) highordercoef: boolean Returns ------- score:", "all lenslets 2. a 2D ndarray with the Y position of all lenslets", "coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s],", "the coefficients of polynomial coordinate transformation trimfrac: float fraction of lenslet outliers (high", "def _transform(x, y, order, coef, highordercoef=None): \"\"\" Private function _transform in locate_psflets Apply", "ValueError(\"Polynomial order must be integer\") else: if order < 1 or order >", "if np.all(y[:, :, nlam_max] == 0): break self.xindx = x[:, :, :nlam_max] self.yindx", "score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1,", "iy - subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac)", "coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(1,", "solution in the appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves", "+= coef[i] * x**ix * y**iy i += 1 for ix in range(order1", "ix + 1): if ix + iy <= order1: continue _y += coef[i]", "apart from the rotation matrix given by scale and phi. \"\"\" try: if", "subshape, y0=iy + ydim / 2. - subshape, scale=scale, phi=phi) else: coef =", "resides ''' if infile is None: infile = re.sub('//', '/', infiledir + '/PSFloc.fits')", "order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok =", "np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None):", "standard deviation of convolving Gaussian used for estimating the grid of centroids. Should", "Transformed coordinates \"\"\" try: if not len(coef) == (order + 1) * (order", "in locate_psflets Apply the coefficients given to transform the coordinates using a polynomial.", "the center. ############################################################# gridfrac = 20 ydim, xdim = inImage.data.shape x = np.arange(-(ydim", "lenslets. Default atan(1.926) x0: float x offset to apply to the central pixel.", "coefficients for frame \" + inImage.filename) coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval,", "of the coefficients of polynomial coordinate transformation trimfrac: float fraction of lenslet outliers", "= np.meshgrid(x, x) ############################################################# # Set up polynomial coefficients, convert from lenslet #", "Calculates the wavelength at the center of each pixel within a microspectrum Parameters", "wavelengths for a given lenslet (some wavelengths fall outside of the detector area)", "range(order1 + 1): for iy in range(order1 - ix + 1): _y +=", "np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape)", "y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape)", "Y position of all lenslets 3. a 2D ndarray with the number of", "= scale * np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) +", "scale. A loop then does a quick check to find reasonable offsets in", "polynomial fit trimfrac: float fraction of outliers (high & low combined) to trim", "The list of coefficients has space for a polynomial fit of the input", "infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data", "= True else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2)", "copy import glob import logging import os import re import numpy as np", "import re import numpy as np from astropy.io import fits from scipy import", "coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int)", ":nlam_max] self.nlam = nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def", "for frame \" + inImage.filename) _x, _y = _transform(x, y, polyorder, coef_opt) #############################################################", "\" + inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered,", "in lenslet array Returns ------- interp_x: float X coordinate on the detector interp_y:", "in pixels of the PSFlets. Default 15.02. phi: float The pitch angle of", "= np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation", "x**ix * y**iy i += 1 for ix in range(order2 + 1): for", "list of floats List of the coefficients. Must match the length required by", "np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation", "down to SNR/PSFlet ~ 1 # Create slice indices for subimages to perform", "String Name of the file infiledir: String Directory in which the file resides", "def return_locations_short(self, coef, xindx, yindx): ''' Returns the x,y detector location of a", "Rectilinear grid y: ndarray of floats Rectilinear grid order: int Order of the", "self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self, lam, allcoef, xindx, yindx, order=3): '''", "Wavelength in nm allcoef: list of floats Polynomial coefficients of wavelength solution xindx:", "inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac),", "k=1, s=0) nlam[ix, iy] = y2 - y1 + 1 y[ix, iy, :nlam[ix,", "Seems to be robust down to SNR/PSFlet ~ 1 # Create slice indices", "i = 0 j = 0 for ix in range(coeforder + 1): for", "mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian,", "ValueError(\"Number of coefficients incorrect for polynomial order.\") except: raise AttributeError(\"order must be integer,", "trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x if not res.success: log.info(\"Optimizing", "the trimmed mean. Parameters ---------- coef: list of floats coefficients for polynomial transformation", "y**iy i += 1 for ix in range(order2 + 1): for iy in", "located at `xindx`, `yindx` for desired wavelength `lam` Parameters ---------- lam: float Wavelength", "yindx = np.meshgrid(xindx, xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y", "+ 1): _x += coef[i] * x**ix * y**iy i += 1 for", "= _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval < bestval:", "interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def return_res(self, lam,", "The new dimensionality in both x and y is 2*subsize ############################################################# if coef", "= coefshort[j] j += 1 i += 1 for ix in range(coeforder +", "lam1=None, lam2=None): \"\"\" Calculates the wavelength at the center of each pixel within", "for i in range(1, order + 1): xarr[:, i] = np.log(lam)**i for i", "= np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(self.order +", "these and the higher-order coefficients. This routine seems to be relatively robust down", "bestval: bestval = newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of", "The polynomial order of the grid distortion scale: float The linear separation in", "_s = x.shape[0] * 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix", "-1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an Image class, assumed to be", "= np.zeros(np.asarray(y).shape) i = 0 for ix in range(order1 + 1): for iy", "low combined) to trim in the minimization. Default 0.1 (5% trimmed on the", "Loads existing wavelength calibration file Parameters ---------- infile: String Name of the file", "if not res.success: log.info(\"Optimizing PSFlet location transformation coefficients may have failed for frame", "to: 0. the list of wavelengths at which the calibration is done 1.", "positions of the spot centroids. This is designed to constrain the domain of", "_y = _transform(x, y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant',", "1 return [_x, _y] def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\"", "extrapolated limits of wavlength solution # to 4% below and 3% above limits", "* 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for", "polynomial coefficients set, the optimizer refines these and the higher-order coefficients. This routine", "* y**iy i += 1 return [_x, _y] def _corrval(coef, x, y, filtered,", "already-existing wavelength calibration file infile: String If load is True, this is the", "np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for i in range(n_spline): coef = np.zeros((coeforder", "nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in", "+ 0.25) - 1.5 + 1e-12) i = 0 j = 0 for", "(_x < xdim - 5) * (_y > 5) * (_y < ydim", "self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline =", "coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef) return", "(coeforder + 1) * (coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients", "combined) to trim in the minimization. Default 0.1 (5% trimmed on the high", "- ix + 1): if ix + iy <= order: coef_short += [coef[i]]", "of # offsets. Seems to be robust down to SNR/PSFlet ~ 1 #", "calibration file infile: String If load is True, this is the name of", "y0: float y offset to apply to the central pixel. Default 0 Returns", "= np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for", "scale: float The linear separation in pixels of the PSFlets. Default 15.02. phi:", "tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max", "+ inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder,", "most and the least flux to limit the impact of outliers). Analogous to", "_initcoef in locate_psflets Create a set of coefficients including a rotation matrix plus", "fall outside of the detector area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting to", "from image import Image log = logging.getLogger('main') class PSFLets: \"\"\" Helper class to", "apply to the central pixel. Default 0 y0: float y offset to apply", "coefficients for polynomial transformation x: ndarray coordinates of lenslets y: ndarray coordinates of", "of the final array. This is sufficient to cover the detector for the", "discard these from the calculation before trimming. ################################################################# _x, _y = _transform(x, y,", "high end, 5% on the low end) Returns ------- x: 2D ndarray Estimated", "y: Grid of lenslet IDs, Lenslet (0, 0) is the center. ############################################################# gridfrac", "glob import logging import os import re import numpy as np from astropy.io", "# default. ################################################################### if lam1 is None: lam1 = np.amin(lam) / 1.04 if", "order must be integer\") n = (order + 1) * (order + 2)", "method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x if not res.success: log.info(\"Optimizing PSFlet", "coefshort[j] j += 1 i += 1 return coef def _transform(x, y, order,", "the most and the least flux to limit the impact of outliers). Analogous", "sig: float standard deviation of convolving Gaussian used for estimating the grid of", "* sig + 1), int(3 * sig + 1) + 1) x, y", "x = np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out", "interpolated wavelength array Parameters ---------- lam: float Wavelength in nm allcoef: list of", "n**2 + 3*n + 2 = (n + 1.5)**2 - 0.25 # =", "for polynomial order.\") xindx = np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline", "with the most and the least flux to limit the impact of outliers).", "ix + 1): if ix + iy <= order: coef_short += [coef[i]] i", "pitch angle and scale. A loop then does a quick check to find", "= signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered", "coefficients that best fit the lenslets, for all wavelengths order: int Order of", "Calculates the detector coordinates of lenslet located at `xindx`, `yindx` for desired wavelength", "sig**2)) if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered =", "PSFlet location transformation coefficients\") init = True else: ix_arr = np.arange(-3.0, 3.05, 0.2)", "separation in pixels of the PSFlets. Default 15.02. phi: float The pitch angle", "order < 1 or order > 5: raise ValueError(\"Polynomial order must be >0,", "+ 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") coef", "to save pixel solution to directory \" + outdir + \". Directory does", "else: if order < 1 or order > 5: raise ValueError(\"Polynomial order must", "of floats List of the coefficients. Must match the length required by order", "locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes", "a 2D ndarray with the X position of all lenslets 2. a 2D", "good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x =", "coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin", "'ftol': 1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval,", "(2 * sig**2)) if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else:", "coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\") init = False bestval", "the PSFLet class: the array of xindx, yindx, nlam, lam_indx and nlam_max \"\"\"", "This is sufficient to cover the detector for the fiducial lenslet spacing. \"\"\"", "offset to apply to the central pixel. Default 0 Returns ------- coef: list", "been computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape)", "rotation matrix given by scale and phi. \"\"\" try: if not order ==", "2)) for k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k interp_x,", "Y index of lenslet in lenslet array Returns ------- interp_x: float X coordinate", "be optimized. Notes ----- The list of coefficients has space for a polynomial", "+ 1): _y += coef[i] * x**ix * y**iy i += 1 if", "the detector. Does most of the heavy lifting during the wavelength calibration step.", "int Y index of lenslet in lenslet array Returns ------- interp_x: float X", "2D ndarray Estimated spot centroids in x. y: 2D ndarray Estimated spot centroids", "= (1/4)*((2*n + 3)**2 - 1) = len(coef) order1 = int(np.sqrt(len(coef) + 0.25)", "1e-12) i = 0 j = 0 for ix in range(coeforder + 1):", "y: ndarray coordinates of lenslets filtered: ndarray image convolved with gaussian PSFlet order:", "Gaussian, centroid the filtered image. ############################################################# x = np.arange(-1 * int(3 * sig", "load an already-existing wavelength calibration file infile: String If load is True, this", "+ inImage.filename) _x, _y = _transform(x, y, polyorder, coef_opt) ############################################################# # Boolean: do", "solution. These will be used to smooth/interpolate the wavelength solution, and ultimately to", "y offset to apply to the central pixel. Default 0 Returns ------- coef:", "of PSFlet fluxes, to be minimized \"\"\" ################################################################# # Use np.nan for lenslet", "\" + outdir + \". Directory does not exist.\") outfile = re.sub('//', '/',", "np.ones((lam.shape[0], order + 1)) for i in range(1, order + 1): xarr[:, i]", "order: int Order of the polynomial fit coef: list of floats List of", "calculation before trimming. ################################################################# _x, _y = _transform(x, y, order, coef, highordercoef) vals", "for a single wavelength xindx: int X index of lenslet in lenslet array", "float x offset to apply to the central pixel. Default 0 y0: float", "ydim / 2. - subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] +=", "wavelength `lam` Parameters ---------- lam: float Wavelength in nm allcoef: list of floats", "* y**iy i += 1 for ix in range(order2 + 1): for iy", "those with the most and the least flux to limit the impact of", "+ 1) * (coeforder + 2)) for k in range(1, interporder + 1):", "range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self, lam,", "15.02. phi: float The pitch angle of the lenslets. Default atan(1.926) x0: float", "_transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None,", "outside the CHARIS FOV, # discard these from the calculation before trimming. #################################################################", "file Parameters ---------- infile: String Name of the file infiledir: String Directory in", "monochromatic grid of spots polyorder: float order of the polynomial coordinate transformation. Default", "nm Notes ----- This functions fills in most of the fields of the", "fits from scipy import interpolate, ndimage, optimize, signal try: from charis.image import Image", "PSFlet spot fluxes (disregarding those with the most and the least flux to", "np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None,", "<= shortorder: coef[i] = coefshort[j] j += 1 i += 1 return coef", "True, this is the name of the file infiledir: String If load is", "monochromatic grid of spots with read noise and shot noise, and returns the", "describing the polynomial coefficients that best fit the lenslets, for all wavelengths order:", "Must match the length required by order = (order+1)*(order+2) highordercoef: Boolean Returns -------", "\"\"\" Private function _transform in locate_psflets Apply the coefficients given to transform the", "on the detector ''' if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1", "array yindx: int Y index of lenslet in lenslet array order: int Order", "check to find reasonable offsets in x and y. With all of the", "y0=iy + ydim / 2. - subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save)", "= x.shape[0] * 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in", "if init: coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape, y0=iy", "out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam, allcoef, order=3):", "of the polynomial coordinate transformation. Default 2. sig: float standard deviation of convolving", "np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef def return_locations_short(self, coef, xindx, yindx): '''", "the detector area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution", "+ 3)**2 - 1) = len(coef) order1 = int(np.sqrt(len(coef) + 0.25) - 1.5", "---------- infile: String Name of the file infiledir: String Directory in which the", "x. y: 2D ndarray Estimated spot centroids in y. good:2D boolean ndarray True", "in the output apart from the rotation matrix given by scale and phi.", "Should be close to the true value for the PSF-let spots. Default 0.7.", "14, 0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init", "int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12) i = 0 for ix in", "Private function _initcoef in locate_psflets Create a set of coefficients including a rotation", "gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of", "solution. Obtain extrapolated limits of wavlength solution # to 4% below and 3%", "coordinate on the detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y =", "try: from charis.image import Image except: from image import Image log = logging.getLogger('main')", "+ 0.25) - 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5", "We want to start with a decent guess, so we use a grid", "= np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]):", "1) * (coeforder + 2)) for k in range(interporder + 1): coef +=", "Parameters ---------- outdir: String Directory in which to put the file. The file", "monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order", "ValueError(\"Number of coefficients incorrect for polynomial order.\") coef = np.zeros((coeforder + 1) *", "= np.linspace(lam1, lam2, n_spline) for i in range(n_spline): coef = np.zeros((coeforder + 1)", "slightly higher wavelength, so try out offsets # that are slightly to the", "== 0): break self.xindx = x[:, :, :nlam_max] self.yindx = y[:, :, :nlam_max]", "(0, 0) is the center. ############################################################# gridfrac = 20 ydim, xdim = inImage.data.shape", "xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam", "solution Returns ------- interp_x: float X coordinate on the detector interp_y: float Y", "is 2*subsize ############################################################# if coef is None: ix_arr = np.arange(0, 14, 0.5) iy_arr", "None: lam2 = np.amax(lam) * 1.03 interporder = order if self.interp_arr is None:", "range(order1 + 1): for iy in range(order1 - ix + 1): _x +=", "load is True, this is the directory in which the file resides '''", "print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 =", "_initcoef(polyorder, x0=ix + xdim / 2. - subshape, y0=iy + ydim / 2.", "1 n_spline = 100 interp_lam = np.linspace(lam1, lam2, n_spline) dy = [] dx", "filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt", "read noise and shot noise, and returns the esimated positions of the spot", "def return_locations(self, lam, allcoef, xindx, yindx, order=3): ''' Calculates the detector coordinates of", "fraction of lenslet outliers (high & low combined) to trim in the minimization.", "polynomial fit of the input order (i.e., for order 3, up to terms", "order: coef_short += [coef[i]] i += 1 for ix in range(coeforder + 1):", "array Returns ------- interp_x: float X coordinate on the detector interp_y: float Y", "ndarray Rectilinear grid y: ndarray of floats Rectilinear grid order: int Order of", "0 y0: float y offset to apply to the central pixel. Default 0", "i += 1 if highordercoef is None: return [_x, _y] else: order2 =", "wavlength solution # to 4% below and 3% above limits of the coefficient", "is None: lam2 = np.amax(lam) * 1.03 interporder = order if self.interp_arr is", "the coefficients. Must match the length required by order = (order+1)*(order+2) highordercoef: Boolean", "nm allcoef: list of floats Polynomial coefficients of wavelength solution xindx: int X", "gaussian PSFlet order: int order of the polynomial fit trimfrac: float fraction of", "coefficients of polynomial coordinate transformation trimfrac: float fraction of lenslet outliers (high &", "allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength at the center of each", "coef) return interp_x, interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates", "nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926,", "np.zeros((n)) coef[0] = x0 coef[1] = scale * np.cos(phi) coef[order + 1] =", "order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength at the center of each pixel", "up to terms like x**3 and x**2*y, but not x**3*y). It is all", "ndarray Estimated spot centroids in y. good:2D boolean ndarray True for lenslets with", "be relatively robust down to per-lenslet signal-to-noise ratios of order unity (or even", "coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt)", "coordinate transformation. Default 2. sig: float standard deviation of convolving Gaussian used for", "does a quick check to find reasonable offsets in x and y. With", "range(order1 - ix + 1): if ix + iy <= order1: continue _y", "/ 2] += subshape ############################################################# # If we have coefficients from last time,", "transformation coefficients for frame \" + inImage.filename) if not init and fitorder is", "for best-fit polynomial fits to the coefficients of the wavelength solution. These will", "+= subshape coef_opt[(polyorder + 1) * (polyorder + 2) / 2] += subshape", "[coef[i]] i += 1 return coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) +", "interp_y[:, ix, iy] if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y", "float Lowest wavelength in nm lam2: float Highest wavelength in nm Notes -----", "y1 + 1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1) lam_out[ix,", "sum of PSFlet fluxes, to be minimized \"\"\" ################################################################# # Use np.nan for", "coef: list of floats List of best-fit polynomial coefficients Notes ----- the coefficients,", "/ 2. - subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] += ix", "i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:,", "polynomial fits to the coefficients of the wavelength solution. These will be used", "is name PSFloc.fits and is a multi-extension FITS file, each extension corresponding to:", "the grid distortion scale: float The linear separation in pixels of the PSFlets.", "Default 2. sig: float standard deviation of convolving Gaussian used for estimating the", "fit coef: list of floats List of the coefficients. Must match the length", "int Order of the polynomical fit lam1: float Lowest wavelength in nm lam2:", "with a decent guess, so we use a grid of # offsets. Seems", "file infiledir: String If load is True, this is the directory in which", "subshape = xdim * 3 // 8 _s = x.shape[0] * 3 //", "spots. Default 0.7. coef: list initial guess of the coefficients of polynomial coordinate", "Parameters ---------- infile: String Name of the file infiledir: String Directory in which", "Apply the coefficients given to transform the coordinates using a polynomial. Parameters ----------", "k=1, s=0) except: print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0)", "of the input order (i.e., for order 3, up to terms like x**3", "at a slightly higher wavelength, so try out offsets # that are slightly", "+ 1) * (polyorder + 2) / 2] += subshape ############################################################# # If", "the calibration is done 1. a 2D ndarray with the X position of", "set of coefficients including a rotation matrix plus zeros. Parameters ---------- order: int", "= -1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926,", "interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam,", "order=3): if self.interp_arr is None: if alllam is None or allcoef is None:", "coef def return_locations(self, lam, allcoef, xindx, yindx, order=3): ''' Calculates the detector coordinates", "1 if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]: raise", "list of coefficients has space for a polynomial fit of the input order", "in nm allcoef: list of lists floats Polynomial coefficients of wavelength solution order:", "allcoef) return interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder =", "charis.image import Image except: from image import Image log = logging.getLogger('main') class PSFLets:", "if ix + iy <= order1: continue _x += coef[i] * x**ix *", "_y: ndarray Transformed coordinates \"\"\" try: if not len(coef) == (order + 1)", "1) _dx, _dy = _transform(xindx, yindx, coeforder, coef) dx += [_dx] dy +=", "order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in locate_psflets Return the negative of", "required by order = (order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray Transformed coordinates", "not init and fitorder is not None: coef_lin = _pullorder(coef_opt, fitorder) res =", "+ y**2) / (2 * sig**2)) if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data,", "inverse. Parameters ---------- lam: float Wavelength in nm allcoef: list of lists floats", "'ftol': 1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder", "to the known pitch angle and scale. A loop then does a quick", "newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of PSFlet location transformation", "np.log(lam)**k return coef def return_locations(self, lam, allcoef, xindx, yindx, order=3): ''' Calculates the", "is all zeros in the output apart from the rotation matrix given by", "# Convolve with a Gaussian, centroid the filtered image. ############################################################# x = np.arange(-1", "polyorder, trimfrac) if newval < bestval: bestval = newval coef_opt = copy.deepcopy(coef) if", "to the central pixel. Default 0 Returns ------- coef: list of floats A", "the length required by order = (order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray", "= int(vals_ok.shape[0] * trimfrac / 2) vals_sorted = np.sort(vals_ok) score = -1 *", "end, 5% on the low end) highordercoef: boolean Returns ------- score: float Negative", "try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise else: tck_y", "polyorder: float order of the polynomial coordinate transformation. Default 2. sig: float standard", "np.cos(phi) coef[order + 1] = -scale * np.sin(phi) coef[n / 2] = y0", "the coefficients given to transform the coordinates using a polynomial. Parameters ---------- x:", "- 1) _dx, _dy = _transform(xindx, yindx, coeforder, coef) dx += [_dx] dy", "name of the file infiledir: String If load is True, this is the", "coef_short += [coef[i]] i += 1 return coef_short def _insertorder(coefshort, coef): coeforder =", "highordercoef is None: return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25) -", "self.xindx = None self.yindx = None self.lam_indx = None self.nlam = None self.nlam_max", "wavelength calibration file infile: String If load is True, this is the name", "y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0)", "= [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy]", "of the polynomical fit lam1: float Lowest wavelength in nm lam2: float Highest", "to compute its inverse. Parameters ---------- lam: float Wavelength in nm allcoef: list", "with gaussian PSFlet order: int order of the polynomial fit trimfrac: float fraction", "'ftol': 1e-5}) coef_opt = res.x if not res.success: log.info(\"Optimizing PSFlet location transformation coefficients", "Wavelength in nm allcoef: list of floats List describing the polynomial coefficients that", "= (order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray Transformed coordinates _y: ndarray Transformed", "with a Gaussian, centroid the filtered image. ############################################################# x = np.arange(-1 * int(3", "lenslet (some wavelengths fall outside of the detector area) ''' if not os.path.isdir(outdir):", "loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration file Parameters ---------- infile: String", "function locatePSFlets takes an Image class, assumed to be a monochromatic grid of", "be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") n = (order", "tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0): break self.xindx", "coef[0] = x0 coef[1] = scale * np.cos(phi) coef[order + 1] = -scale", "ix in range(coeforder + 1): for iy in range(coeforder - ix + 1):", "function _transform in locate_psflets Apply the coefficients given to transform the coordinates using", "if ix + iy <= order1: continue _y += coef[i] * x**ix *", "+ 1, allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0], order + 1)) for", "shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12) i = 0 j", "(09/2015), the number of lenslets to grid is hard-coded as 1/10 the dimensionality", "coef_short += [coef[i]] i += 1 for ix in range(coeforder + 1): for", "final optimization of PSFlet location transformation coefficients for frame \" + inImage.filename) if", "to be a monochromatic grid of spots with read noise and shot noise,", "wavelength at the center of each pixel within a microspectrum Parameters ---------- lam:", "100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2,", "\"\"\" try: if not len(coef) == (order + 1) * (order + 2):", "filtered: ndarray image convolved with gaussian PSFlet order: int order of the polynomial", "Parameters ---------- order: int The polynomial order of the grid distortion scale: float", "nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0): break self.xindx = x[:,", "\"\"\" ############################################################# # Convolve with a Gaussian, centroid the filtered image. ############################################################# x", "i += 1 for ix in range(order1 + 1): for iy in range(order1", "to limit the impact of outliers). Analogous to the trimmed mean. Parameters ----------", "(5% trimmed on the high end, 5% on the low end) highordercoef: boolean", "Shortest wavelength in nm lam2: float Longest wavelength in nm Returns ------- interp_lam:", "coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol':", "of polynomial wavelength solution Returns ------- interp_x: float X coordinate on the detector", "coefficients may have failed for frame \" + inImage.filename) _x, _y = _transform(x,", "least flux to limit the impact of outliers). Analogous to the trimmed mean.", "above limits of the coefficient file by # default. ################################################################### if lam1 is", "range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef def return_locations_short(self, coef,", "Default 0 Returns ------- coef: list of floats A list of length (order+1)*(order+2)", "area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution to directory", "incorrect for polynomial order.\") except: raise AttributeError(\"order must be integer, coef should be", "interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline)", "a single wavelength xindx: int X index of lenslet in lenslet array yindx:", "coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)", "integer\") n = (order + 1) * (order + 2) coef = np.zeros((n))", "(coeforder + 2)) for k in range(self.order + 1): coef += self.interp_arr[k] *", "wavelength solution in the appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): '''", "set, the optimizer refines these and the higher-order coefficients. This routine seems to", "np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters ---------- outdir: String", "1 if highordercoef is None: return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef) +", "- 1.5 + 1e-12) i = 0 j = 0 for ix in", "+= coef[i] * x**ix * y**iy i += 1 for ix in range(order2", "Y index of lenslet in lenslet array order: int Order of polynomial wavelength", "ix_arr = np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location", "y = np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2))", "* np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) +", "0.7. coef: list initial guess of the coefficients of polynomial coordinate transformation trimfrac:", "* (coeforder + 2)) for k in range(1, interporder + 1): coef +=", "of best-fit polynomial coefficients Notes ----- the coefficients, if not supplied, are initially", "be used to smooth/interpolate the wavelength solution, and ultimately to compute its inverse.", "Parameters ---------- lam: float Wavelength in nm allcoef: list of floats List describing", "0 for ix in range(coeforder + 1): for iy in range(coeforder - ix", "Lenslet (0, 0) is the center. ############################################################# gridfrac = 20 ydim, xdim =", "the PSFLets on the detector. Does most of the heavy lifting during the", "format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters", "yindx, order=3): ''' Calculates the detector coordinates of lenslet located at `xindx`, `yindx`", "list of wavelengths at which the calibration is done 1. a 2D ndarray", "coefficients incorrect for polynomial order.\") except: raise AttributeError(\"order must be integer, coef should", "resolution ''' if lam1 is None: lam1 = np.amin(lam) / 1.04 if lam2", "self.yindx = None self.lam_indx = None self.nlam = None self.nlam_max = None self.interp_arr", "_x: ndarray Transformed coordinates _y: ndarray Transformed coordinates \"\"\" try: if not len(coef)", "0) is the center. ############################################################# gridfrac = 20 ydim, xdim = inImage.data.shape x", "############################################################# # Set up polynomial coefficients, convert from lenslet # coordinates to coordinates", "options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res", "+ 1): if ix + iy <= order1: continue _y += coef[i] *", "Highest wavelength in nm Notes ----- This functions fills in most of the", "transformation. Default 2. sig: float standard deviation of convolving Gaussian used for estimating", "- 5) * (_y > 5) * (_y < ydim - 5) return", "and y is 2*subsize ############################################################# if coef is None: ix_arr = np.arange(0, 14,", "dimensionality of the final array. This is sufficient to cover the detector for", "lenslet outliers (high & low combined) to trim in the minimization. Default 0.1", "ndarray of floats Rectilinear grid order: int Order of the polynomial fit coef:", "If load is True, this is the name of the file infiledir: String", "subshape:-subshape]) for ix in ix_arr: for iy in iy_arr: if init: coef =", "= logging.getLogger('main') class PSFLets: \"\"\" Helper class to deal with the PSFLets on", "range(coeforder + 1): for iy in range(coeforder - ix + 1): if ix", "for frame \" + inImage.filename) if not init and fitorder is not None:", "wavelength solution Notes ----- Populates the attribute interp_arr in PSFLet class ''' self.interp_arr", "y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x if", "of the PSFlets. Default 15.02. phi: float The pitch angle of the lenslets.", "np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init = True else: ix_arr", "of wavlength solution # to 4% below and 3% above limits of the", "= np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function _initcoef", "+ 1) * (order + 2): pass # raise ValueError(\"Number of coefficients incorrect", "'/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx = hdulist[0].data self.yindx =", "nm allcoef: list of lists floats Polynomial coefficients of wavelength solution order: int", "_x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0 for ix in range(order1", "+ 1): if ix + iy <= order1: continue _x += coef[i] *", "lam1: float Lowest wavelength in nm lam2: float Highest wavelength in nm Notes", "coef[i] * x**ix * y**iy i += 1 return [_x, _y] def _corrval(coef,", "middle XX% of the PSFlet spot fluxes (disregarding those with the most and", "x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in locate_psflets Return", "low end) highordercoef: boolean Returns ------- score: float Negative sum of PSFlet fluxes,", "coefficients Notes ----- the coefficients, if not supplied, are initially set to the", "of the PSF-let fitting later in the pipeline. Parameters ---------- imImage: Image class", "iy]], tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for", "in which the file resides ''' self.xindx = None self.yindx = None self.lam_indx", "lam2: float Longest wavelength in nm Returns ------- interp_lam: array Array of wavelengths", "in x and y. With all of the first-order polynomial coefficients set, the", "init: coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape, y0=iy +", "yindx, order=3, lam1=None, lam2=None): ''' Returns the spectral resolution and interpolated wavelength array", "the final array. This is sufficient to cover the detector for the fiducial", "coef[i] = coefshort[j] j += 1 i += 1 for ix in range(coeforder", "_pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder,", "* int(3 * sig + 1), int(3 * sig + 1) + 1)", "2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") coef =", "= int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x,", "signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /=", "Returns ------- coef: list of floats A list of length (order+1)*(order+2) to be", "for iy in range(order1 - ix + 1): if ix + iy <=", "1 i += 1 for ix in range(coeforder + 1): for iy in", "5% on the low end) highordercoef: boolean Returns ------- score: float Negative sum", "the calculation before trimming. ################################################################# _x, _y = _transform(x, y, order, coef, highordercoef)", "the esimated positions of the spot centroids. This is designed to constrain the", "FOV, # discard these from the calculation before trimming. ################################################################# _x, _y =", "_s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval < bestval: bestval = newval", "x**ix * y**iy i += 1 if highordercoef is None: return [_x, _y]", "using a polynomial. Parameters ---------- x: ndarray Rectilinear grid y: ndarray of floats", "list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\") init = False bestval = 0", "True, this is the directory in which the file resides ''' self.xindx =", "= int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam = np.linspace(lam1, lam2, n_spline) dy", "<= order1: continue _y += coef[i] * x**ix * y**iy i += 1", "return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\"", "return [_x, _y] def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private", "+ 1): xarr[:, i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr,", "initially set to the known pitch angle and scale. A loop then does", "2)) for k in range(interporder + 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i],", "= vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac / 2) vals_sorted = np.sort(vals_ok) score", "== 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder,", "of convolving Gaussian used for estimating the grid of centroids. Should be close", "iy in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y = interp_y[:, ix, iy]", "n_spline) dy = [] dx = [] for i in range(n_spline): coef =", "np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5", "the coefficients. # We want to start with a decent guess, so we", "= _initcoef(polyorder, x0=ix + xdim / 2. - subshape, y0=iy + ydim /", "is None: raise ValueError(\"Interpolation array has not been computed. Must call monochrome_coef with", "locate_psflets Apply the coefficients given to transform the coordinates using a polynomial. Parameters", "the X position of all lenslets 2. a 2D ndarray with the Y", "of wavelength solution order: int Order of polynomial wavelength solution Notes ----- Populates", "= fits.open(infile) try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam", "subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for iy in iy_arr: if", "polyorder, coef_opt) ############################################################# # Boolean: do the lenslet PSFlets lie within the detector?", "+= subshape ############################################################# # If we have coefficients from last time, we assume", "and interpolated wavelength array Parameters ---------- lam: float Wavelength in nm allcoef: list", "------- score: float Negative sum of PSFlet fluxes, to be minimized \"\"\" #################################################################", "ix + 1): if ix + iy <= order1: continue _x += coef[i]", "in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k", "to terms like x**3 and x**2*y, but not x**3*y). It is all zeros", "intial # fits on. The new dimensionality in both x and y is", "the filtered image. ############################################################# x = np.arange(-1 * int(3 * sig + 1),", "outfile = re.sub('//', '/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int)))", "good = (_x > 5) * (_x < xdim - 5) * (_y", "# Create slice indices for subimages to perform the intial # fits on.", "1) x, y = np.meshgrid(x, x) ############################################################# # Set up polynomial coefficients, convert", "if coef is None: ix_arr = np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25,", "order of the grid distortion scale: float The linear separation in pixels of", "a polynomial fit of the input order (i.e., for order 3, up to", "coef = copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder + 1) * (polyorder", "of the file infiledir: String Directory in which the file resides ''' if", "the detector footprint coef: list of floats List of best-fit polynomial coefficients Notes", "\"\"\" Private function _corrval in locate_psflets Return the negative of the sum of", "2. sig: float standard deviation of convolving Gaussian used for estimating the grid", "each extension corresponding to: 0. the list of wavelengths at which the calibration", "pix_y = interp_y[:, ix, iy] if np.all(pix_x < 0) or np.all(pix_x > 2048)", "1 for ix in range(order2 + 1): for iy in range(order1 - ix", "1e-5, 'ftol': 1e-5}) coef_opt = res.x if not res.success: log.info(\"Optimizing PSFlet location transformation", "Polynomial coefficients of wavelength solution xindx: int X index of lenslet in lenslet", "range(coeforder - ix + 1): if ix + iy <= order: coef_short +=", "interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) -", "lam2, n_spline) for i in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder", "which the calibration is done 1. a 2D ndarray with the X position", "__init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the class Parameters ---------- load: Boolean Whether", "= np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix,", "self.nlam = nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order,", "* np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25) -", "= copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder + 1) * (polyorder +", "up array to solve for best-fit polynomial fits to the coefficients of the", "wavelength in nm lam2: float Highest wavelength in nm Notes ----- This functions", "ndarray Transformed coordinates _y: ndarray Transformed coordinates \"\"\" try: if not len(coef) ==", "+= [coef[i]] i += 1 for ix in range(coeforder + 1): for iy", "range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k in", "interp_lam = np.linspace(lam1, lam2, n_spline) dy = [] dx = [] for i", "out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam, allcoef, order=3): ''' Set up array", "* 1.03 interporder = order if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder", "we # are now at a slightly higher wavelength, so try out offsets", "os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution to directory \" + outdir +", "############################################################# # Convolve with a Gaussian, centroid the filtered image. ############################################################# x =", "new dimensionality in both x and y is 2*subsize ############################################################# if coef is", "to start with a decent guess, so we use a grid of #", "transformation coefficients may have failed for frame \" + inImage.filename) _x, _y =", "/ 2 + 1] = scale * np.sin(phi) coef[n / 2 + order", "1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0 for ix in", "y: 2D ndarray Estimated spot centroids in y. good:2D boolean ndarray True for", "# x, y: Grid of lenslet IDs, Lenslet (0, 0) is the center.", "def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration file Parameters ---------- infile:", "allcoef[:, i])[0] self.interp_arr[:, i] = coef def return_locations_short(self, coef, xindx, yindx): ''' Returns", "in which to put the file. The file is name PSFloc.fits and is", "= optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})", "ix + iy <= order: coef_short += [coef[i]] i += 1 for ix", "= np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef def return_locations_short(self, coef, xindx, yindx):", "dimensionality in both x and y is 2*subsize ############################################################# if coef is None:", "trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin,", "- 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y if", "hdulist = fits.open(infile) try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data", "/ 2 + order + 1] = scale * np.cos(phi) return list(coef) def", "ix + 1): _y += coef[i] * x**ix * y**iy i += 1", "end) highordercoef: boolean Returns ------- score: float Negative sum of PSFlet fluxes, to", "wavelength in nm lam2: float Longest wavelength in nm Returns ------- interp_lam: array", "pixel within a microspectrum Parameters ---------- lam: float Wavelength in nm allcoef: list", "= 0 for ix in range(coeforder + 1): for iy in range(coeforder -", "+= 1 i += 1 for ix in range(coeforder + 1): for iy", "args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin =", "the central pixel. Default 0 Returns ------- coef: list of floats A list", "sum of the middle XX% of the PSFlet spot fluxes (disregarding those with", "+ 1) * (polyorder + 2) / 2] += iy - subshape newval", "the input order (i.e., for order 3, up to terms like x**3 and", "coordinates of lenslets filtered: ndarray image convolved with gaussian PSFlet order: int order", "time, we assume that we # are now at a slightly higher wavelength,", "Returns ------- interp_x: float X coordinate on the detector interp_y: float Y coordinate", "+ 1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix,", "self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration file Parameters", "log.info(\"Performing final optimization of PSFlet location transformation coefficients for frame \" + inImage.filename)", "and scale. A loop then does a quick check to find reasonable offsets", "calibration file Parameters ---------- outdir: String Directory in which to put the file.", "is True, this is the name of the file infiledir: String If load", "`lam` Parameters ---------- lam: float Wavelength in nm allcoef: list of floats Polynomial", "raise ValueError(\"Polynomial order must be integer\") n = (order + 1) * (order", "1] = scale * np.sin(phi) coef[n / 2 + order + 1] =", "polynomial wavelength solution Returns ------- interp_x: float X coordinate on the detector interp_y:", "interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def genpixsol(self, lam,", "to coordinates on the detector array. # Then optimize the coefficients. # We", "location transformation coefficients may have failed for frame \" + inImage.filename) _x, _y", "interp_y: float Y coordinate on the detector ''' if len(allcoef.shape) == 1: coeforder", "wavelengths R: float Effective spectral resolution ''' if lam1 is None: lam1 =", "_s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x", "i += 1 return coef def _transform(x, y, order, coef, highordercoef=None): \"\"\" Private", "image convolved with gaussian PSFlet order: int order of the polynomial fit trimfrac:", "for ix in range(coeforder + 1): for iy in range(coeforder - ix +", "to be optimized. Notes ----- The list of coefficients has space for a", "appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file", "coefficients including a rotation matrix plus zeros. Parameters ---------- order: int The polynomial", "transformation coefficients with previous values\") init = False bestval = 0 subshape =", "list of lists floats Polynomial coefficients of wavelength solution order: int Order of", "These will be used to smooth/interpolate the wavelength solution, and ultimately to compute", "each pixel within a microspectrum Parameters ---------- lam: float Wavelength in nm allcoef:", "_y = np.zeros(np.asarray(y).shape) i = 0 for ix in range(order1 + 1): for", "2D ndarray with the Y position of all lenslets 3. a 2D ndarray", "the domain of the PSF-let fitting later in the pipeline. Parameters ---------- imImage:", "the number of valid wavelengths for a given lenslet (some wavelengths fall outside", "of the polynomial fit trimfrac: float fraction of outliers (high & low combined)", "ultimately to compute its inverse. Parameters ---------- lam: float Wavelength in nm allcoef:", "and is a multi-extension FITS file, each extension corresponding to: 0. the list", "lam: float Wavelength in nm allcoef: list of floats List describing the polynomial", "int(3 * sig + 1) + 1) x, y = np.meshgrid(x, x) gaussian", "floats List of the coefficients. Must match the length required by order =", "= _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac),", "for all wavelengths order: int Order of the polynomical fit lam1: float Lowest", "lam: float Wavelength in nm allcoef: list of lists floats Polynomial coefficients of", "offsets in x and y. With all of the first-order polynomial coefficients set,", "float order of the polynomial coordinate transformation. Default 2. sig: float standard deviation", "for ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy]", "order unity (or even a little less). Important note: as of now (09/2015),", "genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength at the center", "not x**3*y). It is all zeros in the output apart from the rotation", "int Order of polynomial wavelength solution Notes ----- Populates the attribute interp_arr in", "0.25) - 1.5 + 1e-12) coef_short = [] i = 0 for ix", "pixel. Default 0 Returns ------- coef: list of floats A list of length", "== (order + 1) * (order + 2): pass # raise ValueError(\"Number of", "is None: return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5", "dx = [] for i in range(n_spline): coef = np.zeros((coeforder + 1) *", "supplied, are initially set to the known pitch angle and scale. A loop", "order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam = np.linspace(lam1, lam2,", "detector ''' if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y", "on the low end) Returns ------- x: 2D ndarray Estimated spot centroids in", "order + 1): xarr[:, i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef =", "x**2*y, but not x**3*y). It is all zeros in the output apart from", "''' Returns the x,y detector location of a given lenslet for a given", "calibration step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the class Parameters", "to the coefficients of the wavelength solution. These will be used to smooth/interpolate", "2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") xindx =", "''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)", "yindx): ''' Returns the x,y detector location of a given lenslet for a", "attribute interp_arr in PSFLet class ''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order", "import interpolate, ndimage, optimize, signal try: from charis.image import Image except: from image", "if alllam is None or allcoef is None: raise ValueError(\"Interpolation array has not", "the least flux to limit the impact of outliers). Analogous to the trimmed", "of wavelengths at which the calibration is done 1. a 2D ndarray with", "self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder + 1)", "---------- coef: lists floats Polynomial coefficients of fit for a single wavelength xindx:", "of a given lenslet for a given polynomial fit Parameters ---------- coef: lists", "options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x if not res.success: log.info(\"Optimizing PSFlet location", "* x**ix * y**iy i += 1 if highordercoef is None: return [_x,", "------- interp_lam: array Array of wavelengths R: float Effective spectral resolution ''' if", "and phi. \"\"\" try: if not order == int(order): raise ValueError(\"Polynomial order must", "ndarray True for lenslets with spots inside the detector footprint coef: list of", "of lenslet in lenslet array order: int Order of polynomial wavelength solution lam1:", "lists floats Polynomial coefficients of wavelength solution xindx: int X index of lenslet", "assumed to be a monochromatic grid of spots with read noise and shot", "now (09/2015), the number of lenslets to grid is hard-coded as 1/10 the", "y. With all of the first-order polynomial coefficients set, the optimizer refines these", "must be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") # n**2", "(n + 1.5)**2 - 0.25 # = (1/4)*((2*n + 3)**2 - 1) =", "1): coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)", "initial guess. ############################################################# log.info(\"Performing final optimization of PSFlet location transformation coefficients for frame", "wavelength calibration file Parameters ---------- infile: String Name of the file infiledir: String", "domain of the PSF-let fitting later in the pipeline. Parameters ---------- imImage: Image", "= np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters ---------- outdir:", "range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y = interp_y[:,", "polynomial order of the grid distortion scale: float The linear separation in pixels", "in range(1, order + 1): xarr[:, i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]):", "is True, this is the directory in which the file resides ''' self.xindx", "1): _x += coef[i] * x**ix * y**iy i += 1 for ix", "ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:])", "coefficients of wavelength # solution. Obtain extrapolated limits of wavlength solution # to", "String Directory in which to put the file. The file is name PSFloc.fits", "coefficients\") init = True else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0,", "yindx, coeforder, allcoef) return interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order)", "Populates the attribute interp_arr in PSFLet class ''' self.interp_arr = np.zeros((order + 1,", "of lenslet IDs, Lenslet (0, 0) is the center. ############################################################# gridfrac = 20", "+ 1): if ix + iy <= order: coef_short += [coef[i]] i +=", "- 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def", "array of xindx, yindx, nlam, lam_indx and nlam_max \"\"\" ################################################################### # Read in", "Initialize the class Parameters ---------- load: Boolean Whether to load an already-existing wavelength", "1] = -scale * np.sin(phi) coef[n / 2] = y0 coef[n / 2", "iy in range(coeforder - ix + 1): if ix + iy <= order:", "of the polynomial fit coef: list of floats List of the coefficients. Must", "= _transform(x, y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan,", "\"\"\" ################################################################# # Use np.nan for lenslet coordinates outside the CHARIS FOV, #", "computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for", "x**ix * y**iy i += 1 for ix in range(order1 + 1): for", "0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init = True else: ix_arr = np.arange(-3.0,", "1 or order > 5: raise ValueError(\"Polynomial order must be >0, <=5\") except:", "Notes ----- The list of coefficients has space for a polynomial fit of", "lenslet array yindx: int Y index of lenslet in lenslet array order: int", "return_locations(self, lam, allcoef, xindx, yindx, order=3): ''' Calculates the detector coordinates of lenslet", "of the grid distortion scale: float The linear separation in pixels of the", "############################################################# # Boolean: do the lenslet PSFlets lie within the detector? ############################################################# good", "1) * (coeforder + 2)) for k in range(self.order + 1): coef +=", "ix, iy] if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y <", "good initial guess. ############################################################# log.info(\"Performing final optimization of PSFlet location transformation coefficients for", "_transform(xindx, yindx, coeforder, coef) dx += [_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2", "all zeros in the output apart from the rotation matrix given by scale", "reasonable offsets in x and y. With all of the first-order polynomial coefficients", "the detector? ############################################################# good = (_x > 5) * (_x < xdim -", "grid y: ndarray of floats Rectilinear grid order: int Order of the polynomial", "= hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile + \"", "# = (1/4)*((2*n + 3)**2 - 1) = len(coef) order1 = int(np.sqrt(len(coef) +", "to smooth/interpolate the wavelength solution, and ultimately to compute its inverse. Parameters ----------", "does not exist.\") outfile = re.sub('//', '/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx))", "PSFLets on the detector. Does most of the heavy lifting during the wavelength", "Longest wavelength in nm Returns ------- interp_lam: array Array of wavelengths R: float", "limits of the coefficient file by # default. ################################################################### if lam1 is None:", "A list of length (order+1)*(order+2) to be optimized. Notes ----- The list of", "trimming. ################################################################# _x, _y = _transform(x, y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered,", "trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin,", "log.info(\"Optimizing PSFlet location transformation coefficients may have failed for frame \" + inImage.filename)", "yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam =", "lenslets filtered: ndarray image convolved with gaussian PSFlet order: int order of the", "as of now (09/2015), the number of lenslets to grid is hard-coded as", "2D ndarray with the number of valid wavelengths for a given lenslet (some", "to be minimized \"\"\" ################################################################# # Use np.nan for lenslet coordinates outside the", "subshape ############################################################# # If we have coefficients from last time, we assume that", "None: ix_arr = np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet", "= None self.nlam_max = None self.interp_arr = None self.order = None if load:", "negative of the sum of the middle XX% of the PSFlet spot fluxes", "np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(1, interporder +", "Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k", "in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y = interp_y[:, ix, iy] if", "order1: continue _x += coef[i] * x**ix * y**iy i += 1 for", "0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048):", "order: int Order of polynomial wavelength solution Notes ----- Populates the attribute interp_arr", "want to start with a decent guess, so we use a grid of", "_x, _y = _transform(x, y, order, coef, highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x],", "inImage.filename) if not init and fitorder is not None: coef_lin = _pullorder(coef_opt, fitorder)", "xindx: int X index of lenslet in lenslet array yindx: int Y index", "PSFLet class: the array of xindx, yindx, nlam, lam_indx and nlam_max \"\"\" ###################################################################", "+ 1e-12) coef_short = [] i = 0 for ix in range(coeforder +", "None: coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder,", "newval < bestval: bestval = newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial", "spots polyorder: float order of the polynomial coordinate transformation. Default 2. sig: float", "given by scale and phi. \"\"\" try: if not order == int(order): raise", "0): break self.xindx = x[:, :, :nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam", "iy <= shortorder: coef[i] = coefshort[j] j += 1 i += 1 for", "may have failed for frame \" + inImage.filename) _x, _y = _transform(x, y,", "the coefficients, if not supplied, are initially set to the known pitch angle", "grid distortion scale: float The linear separation in pixels of the PSFlets. Default", "iy <= shortorder: coef[i] = coefshort[j] j += 1 i += 1 return", "contain a CHARIS wavelength solution in the appropriate format.\") self.nlam_max = np.amax(self.nlam) def", "PSFLets: \"\"\" Helper class to deal with the PSFLets on the detector. Does", "for nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0): break self.xindx =", "<=5\") except: raise ValueError(\"Polynomial order must be integer\") # n**2 + 3*n +", "subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing initial optimization of PSFlet", "the true value for the PSF-let spots. Default 0.7. coef: list initial guess", "a little less). Important note: as of now (09/2015), the number of lenslets", "2. - subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] += ix -", "PSFlet location transformation coefficients for frame \" + inImage.filename) res = optimize.minimize(_corrval, coef_opt,", "1) * (order + 2): pass # raise ValueError(\"Number of coefficients incorrect for", "+ 0.25) - 1.5 + 1e-12) i = 0 for ix in range(order2", "1) * (polyorder + 2) / 2] += iy - subshape newval =", "detector interp_y: float Y coordinate on the detector ''' coeforder = int(np.sqrt(coef.shape[0])) -", "coef: list initial guess of the coefficients of polynomial coordinate transformation trimfrac: float", "ydim, xdim = inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim // gridfrac +", "final array. This is sufficient to cover the detector for the fiducial lenslet", "interp_lam = np.linspace(lam1, lam2, n_spline) for i in range(n_spline): coef = np.zeros((coeforder +", "lam2 is None: lam2 = np.amax(lam) * 1.03 interporder = order if self.interp_arr", "xdim - 5) * (_y > 5) * (_y < ydim - 5)", "integer\") # n**2 + 3*n + 2 = (n + 1.5)**2 - 0.25", "interp_arr in PSFLet class ''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order =", "resides ''' self.xindx = None self.yindx = None self.lam_indx = None self.nlam =", "= xdim * 3 // 8 _s = x.shape[0] * 3 // 8", "detector. Does most of the heavy lifting during the wavelength calibration step. \"\"\"", "seems to be relatively robust down to per-lenslet signal-to-noise ratios of order unity", "score: float Negative sum of PSFlet fluxes, to be minimized \"\"\" ################################################################# #", "iy <= order: coef_short += [coef[i]] i += 1 return coef_short def _insertorder(coefshort,", "polynomial order.\") except: raise AttributeError(\"order must be integer, coef should be a list.\")", "not order == int(order): raise ValueError(\"Polynomial order must be integer\") else: if order", "# Set up polynomial coefficients, convert from lenslet # coordinates to coordinates on", "estimating the grid of centroids. Should be close to the true value for", "for ix in range(order2 + 1): for iy in range(order1 - ix +", "= np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing", "the right to get a good initial guess. ############################################################# log.info(\"Performing final optimization of", "fits.open(infile) try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam =", "to load an already-existing wavelength calibration file infile: String If load is True,", "+ 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") xindx", "in nm lam2: float Highest wavelength in nm Notes ----- This functions fills", "from the rotation matrix given by scale and phi. \"\"\" try: if not", "np.nan for lenslet coordinates outside the CHARIS FOV, # discard these from the", "order of the polynomial coordinate transformation. Default 2. sig: float standard deviation of", "allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0], order + 1)) for i in", "= interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] = y2 - y1 + 1", "phi. \"\"\" try: if not order == int(order): raise ValueError(\"Polynomial order must be", "limit the impact of outliers). Analogous to the trimmed mean. Parameters ---------- coef:", "for a given polynomial fit Parameters ---------- coef: lists floats Polynomial coefficients of", "except: raise AttributeError(\"order must be integer, coef should be a list.\") try: if", "return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12)", "The file is name PSFloc.fits and is a multi-extension FITS file, each extension", "_corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval < bestval: bestval", "ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] *", "= res.x else: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame", "X position of all lenslets 2. a 2D ndarray with the Y position", "R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None,", "= _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def genpixsol(self, lam, allcoef, order=3,", "functions fills in most of the fields of the PSFLet class: the array", "< pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise", "fitorder=None): \"\"\" function locatePSFlets takes an Image class, assumed to be a monochromatic", "# offsets. Seems to be robust down to SNR/PSFlet ~ 1 # Create", "the heavy lifting during the wavelength calibration step. \"\"\" def __init__(self, load=False, infile=None,", "plus zeros. Parameters ---------- order: int The polynomial order of the grid distortion", "of wavelengths R: float Effective spectral resolution ''' if lam1 is None: lam1", "+ inImage.filename) if not init and fitorder is not None: coef_lin = _pullorder(coef_opt,", "highordercoef=None): \"\"\" Private function _transform in locate_psflets Apply the coefficients given to transform", "be minimized \"\"\" ################################################################# # Use np.nan for lenslet coordinates outside the CHARIS", "except: raise ValueError(\"Polynomial order must be integer\") n = (order + 1) *", "linear separation in pixels of the PSFlets. Default 15.02. phi: float The pitch", "1e-12) i = 0 for ix in range(order2 + 1): for iy in", "for k in range(1, interporder + 1): coef += k * self.interp_arr[k] *", "= None self.interp_arr = None self.order = None if load: self.loadpixsol(infile, infiledir) def", "coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx,", "if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline", "gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar,", "grid order: int Order of the polynomial fit coef: list of floats List", "be close to the true value for the PSF-let spots. Default 0.7. coef:", "Array of wavelengths R: float Effective spectral resolution ''' if lam1 is None:", "directory in which the file resides ''' self.xindx = None self.yindx = None", "'/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True)", "the attribute interp_arr in PSFLet class ''' self.interp_arr = np.zeros((order + 1, allcoef.shape[1]))", "+ 1e-12) i = 0 for ix in range(order2 + 1): for iy", "_insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) shortorder =", "detector location of a given lenslet for a given polynomial fit Parameters ----------", "coef) dx += [_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return", "array to solve for best-fit polynomial fits to the coefficients of the wavelength", "R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr is None: if alllam", "phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function _initcoef in locate_psflets Create a set", "the minimization. Default 0.1 (5% trimmed on the high end, 5% on the", "copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder + 1) * (polyorder + 2)", "= int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder + 1) * (coeforder + 2)", "frame \" + inImage.filename) if not init and fitorder is not None: coef_lin", "y**iy i += 1 if highordercoef is None: return [_x, _y] else: order2", "-1), x0=0, y0=0): \"\"\" Private function _initcoef in locate_psflets Create a set of", "mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of lenslet", "= np.zeros((n)) coef[0] = x0 coef[1] = scale * np.cos(phi) coef[order + 1]", "_s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing initial optimization of", "the fields of the PSFLet class: the array of xindx, yindx, nlam, lam_indx", ":nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private", "* self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx, yindx, coeforder, coef)", "of lenslets to grid is hard-coded as 1/10 the dimensionality of the final", "if pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x,", "interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x =", "the lenslets, for all wavelengths order: int Order of the polynomical fit lam1:", "wavelength in nm Notes ----- This functions fills in most of the fields", "transformation trimfrac: float fraction of lenslet outliers (high & low combined) to trim", "(order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray Transformed coordinates _y: ndarray Transformed coordinates", "the detector for the fiducial lenslet spacing. \"\"\" ############################################################# # Convolve with a", "* x**ix * y**iy i += 1 for ix in range(order1 + 1):", "# Then optimize the coefficients. # We want to start with a decent", "the Y position of all lenslets 3. a 2D ndarray with the number", "args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x", "nm Returns ------- interp_lam: array Array of wavelengths R: float Effective spectral resolution", "Parameters ---------- imImage: Image class Assumed to be a monochromatic grid of spots", "---------- lam: float Wavelength in nm allcoef: list of floats List describing the", "None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength", "to put the file. The file is name PSFloc.fits and is a multi-extension", "allcoef: list of floats Polynomial coefficients of wavelength solution xindx: int X index", "iy in range(coeforder - ix + 1): if ix + iy <= shortorder:", "None self.interp_arr = None self.order = None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self,", "length required by order = (order+1)*(order+2) highordercoef: Boolean Returns ------- _x: ndarray Transformed", "at which the calibration is done 1. a 2D ndarray with the X", "`xindx`, `yindx` for desired wavelength `lam` Parameters ---------- lam: float Wavelength in nm", "up polynomial coefficients, convert from lenslet # coordinates to coordinates on the detector", "############################################################# gridfrac = 20 ydim, xdim = inImage.data.shape x = np.arange(-(ydim // gridfrac),", "+ 2) coef = np.zeros((n)) coef[0] = x0 coef[1] = scale * np.cos(phi)", "both x and y is 2*subsize ############################################################# if coef is None: ix_arr =", "infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration file Parameters ---------- infile: String Name", "y0=0): \"\"\" Private function _initcoef in locate_psflets Create a set of coefficients including", "PSF-let spots. Default 0.7. coef: list initial guess of the coefficients of polynomial", "of xindx, yindx, nlam, lam_indx and nlam_max \"\"\" ################################################################### # Read in wavelengths", "pix_y) raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y))", "order: coef_short += [coef[i]] i += 1 return coef_short def _insertorder(coefshort, coef): coeforder", "polynomial coordinate transformation. Default 2. sig: float standard deviation of convolving Gaussian used", "fit Parameters ---------- coef: lists floats Polynomial coefficients of fit for a single", "floats Polynomial coefficients of wavelength solution xindx: int X index of lenslet in", "lam2: float Highest wavelength in nm Notes ----- This functions fills in most", "- 1.5 + 1e-12) coef_short = [] i = 0 for ix in", "nm allcoef: list of floats List describing the polynomial coefficients that best fit", "# that are slightly to the right to get a good initial guess.", "not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number of", "\" + inImage.filename) if not init and fitorder is not None: coef_lin =", "for k in range(interporder + 1): coef += self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i]", "be integer\") else: if order < 1 or order > 5: raise ValueError(\"Polynomial", "- subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] += ix - subshape", "20 ydim, xdim = inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim // gridfrac", "coef, xindx, yindx): ''' Returns the x,y detector location of a given lenslet", "= _transform(xindx, yindx, coeforder, coef) dx += [_dx] dy += [_dy] R =", "2 = (n + 1.5)**2 - 0.25 # = (1/4)*((2*n + 3)**2 -", "lenslet IDs, Lenslet (0, 0) is the center. ############################################################# gridfrac = 20 ydim,", "order xarr = np.ones((lam.shape[0], order + 1)) for i in range(1, order +", "less). Important note: as of now (09/2015), the number of lenslets to grid", "* (coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial", "nlam, lam_indx and nlam_max \"\"\" ################################################################### # Read in wavelengths of spots, coefficients", "(1/4)*((2*n + 3)**2 - 1) = len(coef) order1 = int(np.sqrt(len(coef) + 0.25) -", "of lenslets y: ndarray coordinates of lenslets filtered: ndarray image convolved with gaussian", "try out offsets # that are slightly to the right to get a", "Analogous to the trimmed mean. Parameters ---------- coef: list of floats coefficients for", "array has not been computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order)", "= 0 for ix in range(order2 + 1): for iy in range(order1 -", "to cover the detector for the fiducial lenslet spacing. \"\"\" ############################################################# # Convolve", "of the file infiledir: String If load is True, this is the directory", "order, coef, highordercoef=None): \"\"\" Private function _transform in locate_psflets Apply the coefficients given", "* np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02,", "file resides ''' if infile is None: infile = re.sub('//', '/', infiledir +", "+ 2) / 2] += subshape ############################################################# # If we have coefficients from", "np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for i", "np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0 for ix in range(order1 + 1):", "detector footprint coef: list of floats List of best-fit polynomial coefficients Notes -----", "i] = coef def return_locations_short(self, coef, xindx, yindx): ''' Returns the x,y detector", "0 for ix in range(order1 + 1): for iy in range(order1 - ix", "np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if", "transformation coefficients for frame \" + inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s,", "5% on the low end) Returns ------- x: 2D ndarray Estimated spot centroids", "< bestval: bestval = newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial optimization", "interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for i in range(n_spline): coef", "inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data *", "the detector array. # Then optimize the coefficients. # We want to start", "if init: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame \"", "coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder + 1)", "* (_y > 5) * (_y < ydim - 5) return [_x, _y,", "the center of each pixel within a microspectrum Parameters ---------- lam: float Wavelength", ":, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\"", "np.arange(-(ydim // gridfrac), ydim // gridfrac + 1) x, y = np.meshgrid(x, x)", "step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the class Parameters ----------", "of polynomial coordinate transformation trimfrac: float fraction of lenslet outliers (high & low", "centroid the filtered image. ############################################################# x = np.arange(-1 * int(3 * sig +", "int Order of polynomial wavelength solution Returns ------- interp_x: float X coordinate on", "''' Returns the spectral resolution and interpolated wavelength array Parameters ---------- lam: float", "from astropy.io import fits from scipy import interpolate, ndimage, optimize, signal try: from", "lam1: float Shortest wavelength in nm lam2: float Longest wavelength in nm Returns", "be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") # n**2 +", "the sum of the middle XX% of the PSFlet spot fluxes (disregarding those", "* y**iy i += 1 for ix in range(order1 + 1): for iy", "allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1): coef +=", "infiledir='./calibrations'): ''' Loads existing wavelength calibration file Parameters ---------- infile: String Name of", "bestval = newval coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of PSFlet", "if highordercoef is None: return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25)", "coef_opt[0] += subshape coef_opt[(polyorder + 1) * (polyorder + 2) / 2] +=", "a slightly higher wavelength, so try out offsets # that are slightly to", "return coef def _transform(x, y, order, coef, highordercoef=None): \"\"\" Private function _transform in", "for lenslets with spots inside the detector footprint coef: list of floats List", "later in the pipeline. Parameters ---------- imImage: Image class Assumed to be a", "will be used to smooth/interpolate the wavelength solution, and ultimately to compute its", "order=3): ''' Set up array to solve for best-fit polynomial fits to the", ":nlam[ix, iy]] = np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix,", "is done 1. a 2D ndarray with the X position of all lenslets", "= len(coef) order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) _x =", "known pitch angle and scale. A loop then does a quick check to", "spectral resolution and interpolated wavelength array Parameters ---------- lam: float Wavelength in nm", "atan(1.926) x0: float x offset to apply to the central pixel. Default 0", "all of the first-order polynomial coefficients set, the optimizer refines these and the", "\" + infile + \" does not appear to contain a CHARIS wavelength", "= np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x = interp_x[:,", "0) or np.all(pix_y > 2048): continue if pix_y[-1] < pix_y[0]: try: tck_y =", "method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) else:", "iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]):", "iy in range(order1 - ix + 1): _y += coef[i] * x**ix *", "coef[1] = scale * np.cos(phi) coef[order + 1] = -scale * np.sin(phi) coef[n", "filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of lenslet IDs, Lenslet (0,", "np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2)) if inImage.ivar", "is the center. ############################################################# gridfrac = 20 ydim, xdim = inImage.data.shape x =", "np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(self.order + 1):", "+ 1) * (order + 2) coef = np.zeros((n)) coef[0] = x0 coef[1]", "assume that we # are now at a slightly higher wavelength, so try", "+= 1 if highordercoef is None: return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef)", "+= k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx, yindx,", "x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1) x, y =", "to the central pixel. Default 0 y0: float y offset to apply to", "= np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]):", "This is designed to constrain the domain of the PSF-let fitting later in", "x: ndarray Rectilinear grid y: ndarray of floats Rectilinear grid order: int Order", "0 for ix in range(order2 + 1): for iy in range(order1 - ix", "(order + 2) coef = np.zeros((n)) coef[0] = x0 coef[1] = scale *", "robust down to SNR/PSFlet ~ 1 # Create slice indices for subimages to", "the higher-order coefficients. This routine seems to be relatively robust down to per-lenslet", "ydim // gridfrac + 1) x, y = np.meshgrid(x, x) ############################################################# # Set", "lenslet # coordinates to coordinates on the detector array. # Then optimize the", "coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam = np.linspace(lam1, lam2, n_spline)", "gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) #############################################################", "these from the calculation before trimming. ################################################################# _x, _y = _transform(x, y, order,", "image import Image log = logging.getLogger('main') class PSFLets: \"\"\" Helper class to deal", "in lenslet array yindx: int Y index of lenslet in lenslet array order:", "highordercoef=None): \"\"\" Private function _corrval in locate_psflets Return the negative of the sum", "the PSF-let fitting later in the pipeline. Parameters ---------- imImage: Image class Assumed", "subshape coef_opt[(polyorder + 1) * (polyorder + 2) / 2] += subshape #############################################################", "coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(interporder", "is None: if alllam is None or allcoef is None: raise ValueError(\"Interpolation array", "lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy in", "len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx,", "order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12) i = 0 for", "scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function _initcoef in locate_psflets Create a", "a microspectrum Parameters ---------- lam: float Wavelength in nm allcoef: list of floats", "- ix + 1): if ix + iy <= order1: continue _x +=", "Private function _transform in locate_psflets Apply the coefficients given to transform the coordinates", "# Boolean: do the lenslet PSFlets lie within the detector? ############################################################# good =", "coefficients has space for a polynomial fit of the input order (i.e., for", "or np.all(pix_y < 0) or np.all(pix_y > 2048): continue if pix_y[-1] < pix_y[0]:", "< xdim - 5) * (_y > 5) * (_y < ydim -", "coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(self.order", "if not order == int(order): raise ValueError(\"Polynomial order must be integer\") else: if", "+ 1) * (coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect", "1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape", "during the wavelength calibration step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize", "This functions fills in most of the fields of the PSFLet class: the", "inside the detector footprint coef: list of floats List of best-fit polynomial coefficients", "= [] i = 0 for ix in range(coeforder + 1): for iy", "# n**2 + 3*n + 2 = (n + 1.5)**2 - 0.25 #", "return list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 +", "Default 15.02. phi: float The pitch angle of the lenslets. Default atan(1.926) x0:", "---------- x: ndarray Rectilinear grid y: ndarray of floats Rectilinear grid order: int", "ndarray Estimated spot centroids in x. y: 2D ndarray Estimated spot centroids in", "coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt)", "scale * np.cos(phi) coef[order + 1] = -scale * np.sin(phi) coef[n / 2]", "return_locations_short(self, coef, xindx, yindx): ''' Returns the x,y detector location of a given", "the spot centroids. This is designed to constrain the domain of the PSF-let", "location transformation coefficients for frame \" + inImage.filename) coef_lin = _pullorder(coef_opt, 1) res", "coef: list of floats coefficients for polynomial transformation x: ndarray coordinates of lenslets", "spot centroids in y. good:2D boolean ndarray True for lenslets with spots inside", "(order + 2): pass # raise ValueError(\"Number of coefficients incorrect for polynomial order.\")", "self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not", "+= [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam,", "is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100", "fills in most of the fields of the PSFLet class: the array of", "################################################################### # Read in wavelengths of spots, coefficients of wavelength # solution. Obtain", "= list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\") init = False bestval =", "try: if not order == int(order): raise ValueError(\"Polynomial order must be integer\") else:", "= (order + 1) * (order + 2) coef = np.zeros((n)) coef[0] =", "100 interp_lam = np.linspace(lam1, lam2, n_spline) dy = [] dx = [] for", "best-fit polynomial coefficients Notes ----- the coefficients, if not supplied, are initially set", "+ 2): pass # raise ValueError(\"Number of coefficients incorrect for polynomial order.\") except:", "order=3): ''' Calculates the detector coordinates of lenslet located at `xindx`, `yindx` for", "= 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1,", "iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with previous", "- 1) = len(coef) order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)", "+ 1): coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy", "frame \" + inImage.filename) coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=(", "coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x,", "for the PSF-let spots. Default 0.7. coef: list initial guess of the coefficients", "the wavelength at the center of each pixel within a microspectrum Parameters ----------", "apply to the central pixel. Default 0 Returns ------- coef: list of floats", "of lenslet outliers (high & low combined) to trim in the minimization. Default", "load is True, this is the name of the file infiledir: String If", "<= order: coef_short += [coef[i]] i += 1 for ix in range(coeforder +", "detector? ############################################################# good = (_x > 5) * (_x < xdim - 5)", "list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for i in range(n_spline):", "unity (or even a little less). Important note: as of now (09/2015), the", "import copy import glob import logging import os import re import numpy as", "= int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) coef_short = [] i =", "= ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of lenslet IDs, Lenslet (0, 0)", "of lenslet in lenslet array order: int Order of polynomial wavelength solution Returns", "_y += coef[i] * x**ix * y**iy i += 1 return [_x, _y]", "takes an Image class, assumed to be a monochromatic grid of spots with", "logging.getLogger('main') class PSFLets: \"\"\" Helper class to deal with the PSFLets on the", "= [] for i in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder", "Boolean Whether to load an already-existing wavelength calibration file infile: String If load", "the detector coordinates of lenslet located at `xindx`, `yindx` for desired wavelength `lam`", "of the spot centroids. This is designed to constrain the domain of the", "given lenslet for a given polynomial fit Parameters ---------- coef: lists floats Polynomial", "= y2 - y1 + 1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2", "cover the detector for the fiducial lenslet spacing. \"\"\" ############################################################# # Convolve with", ":nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]] =", "list.\") try: if not order == int(order): raise ValueError(\"Polynomial order must be integer\")", "range(order1 - ix + 1): _x += coef[i] * x**ix * y**iy i", "in lenslet array order: int Order of polynomial wavelength solution lam1: float Shortest", "subshape coef[(polyorder + 1) * (polyorder + 2) / 2] += iy -", "an already-existing wavelength calibration file infile: String If load is True, this is", "j = 0 for ix in range(coeforder + 1): for iy in range(coeforder", "highordercoef) vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip", "y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in locate_psflets Return the", "with spots inside the detector footprint coef: list of floats List of best-fit", "+= 1 return coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25) -", "of lenslet located at `xindx`, `yindx` for desired wavelength `lam` Parameters ---------- lam:", "in iy_arr: if init: coef = _initcoef(polyorder, x0=ix + xdim / 2. -", "constrain the domain of the PSF-let fitting later in the pipeline. Parameters ----------", "x[:, :, :nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam = nlam self.lam_indx =", "spots with read noise and shot noise, and returns the esimated positions of", "position of all lenslets 2. a 2D ndarray with the Y position of", "coef): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort)", "python import copy import glob import logging import os import re import numpy", "transformation coefficients\") init = True else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr =", "optimized. Notes ----- The list of coefficients has space for a polynomial fit", "+ '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise", "x and y. With all of the first-order polynomial coefficients set, the optimizer", "Image class Assumed to be a monochromatic grid of spots polyorder: float order", "scale * np.sin(phi) coef[n / 2 + order + 1] = scale *", "for frame \" + inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s,", "for polynomial order.\") except: raise AttributeError(\"order must be integer, coef should be a", "not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution to directory \" + outdir", "# solution. Obtain extrapolated limits of wavlength solution # to 4% below and", "coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) +", "low end) Returns ------- x: 2D ndarray Estimated spot centroids in x. y:", "############################################################# # If we have coefficients from last time, we assume that we", "is the name of the file infiledir: String If load is True, this", "= 0 subshape = xdim * 3 // 8 _s = x.shape[0] *", "if not len(coef) == (order + 1) * (order + 2): pass #", "+ \" does not appear to contain a CHARIS wavelength solution in the", "including a rotation matrix plus zeros. Parameters ---------- order: int The polynomial order", "1), int(3 * sig + 1) + 1) x, y = np.meshgrid(x, x)", "zeros in the output apart from the rotation matrix given by scale and", "1.5)**2 - 0.25 # = (1/4)*((2*n + 3)**2 - 1) = len(coef) order1", "import fits from scipy import interpolate, ndimage, optimize, signal try: from charis.image import", "polynomical fit lam1: float Lowest wavelength in nm lam2: float Highest wavelength in", "lenslet located at `xindx`, `yindx` for desired wavelength `lam` Parameters ---------- lam: float", "the list of wavelengths at which the calibration is done 1. a 2D", "number of valid wavelengths for a given lenslet (some wavelengths fall outside of", "coordinates to coordinates on the detector array. # Then optimize the coefficients. #", "return interp_x, interp_y def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the", "spacing. \"\"\" ############################################################# # Convolve with a Gaussian, centroid the filtered image. #############################################################", "incorrect for polynomial order.\") xindx = np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx)", "PSFlet location transformation coefficients for frame \" + inImage.filename) if not init and", "the number of lenslets to grid is hard-coded as 1/10 the dimensionality of", "1, allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0], order + 1)) for i", "in range(order1 - ix + 1): if ix + iy <= order1: continue", "lenslets with spots inside the detector footprint coef: list of floats List of", "should be a list.\") try: if not order == int(order): raise ValueError(\"Polynomial order", "+= self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self, lam, allcoef, xindx, yindx, order=3):", "if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if", "i += 1 for ix in range(order2 + 1): for iy in range(order1", "with the X position of all lenslets 2. a 2D ndarray with the", "PSFlets lie within the detector? ############################################################# good = (_x > 5) * (_x", "coefficients incorrect for polynomial order.\") coef = np.zeros((coeforder + 1) * (coeforder +", "and nlam_max \"\"\" ################################################################### # Read in wavelengths of spots, coefficients of wavelength", "a rotation matrix plus zeros. Parameters ---------- order: int The polynomial order of", "put the file. The file is name PSFloc.fits and is a multi-extension FITS", "convert from lenslet # coordinates to coordinates on the detector array. # Then", "list of floats A list of length (order+1)*(order+2) to be optimized. Notes -----", "- ix + 1): _x += coef[i] * x**ix * y**iy i +=", "If we have coefficients from last time, we assume that we # are", "so try out offsets # that are slightly to the right to get", "signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered", "np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for", "coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape,", "scipy import interpolate, ndimage, optimize, signal try: from charis.image import Image except: from", "= interp_x[:, ix, iy] pix_y = interp_y[:, ix, iy] if np.all(pix_x < 0)", "minimization. Default 0.1 (5% trimmed on the high end, 5% on the low", "Estimated spot centroids in y. good:2D boolean ndarray True for lenslets with spots", "shortorder: coef[i] = coefshort[j] j += 1 i += 1 return coef def", "subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval", "np.sin(phi) coef[n / 2 + order + 1] = scale * np.cos(phi) return", "raise else: tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y)) +", "None or allcoef is None: raise ValueError(\"Interpolation array has not been computed. Must", "xdim * 3 // 8 _s = x.shape[0] * 3 // 8 subfiltered", "The linear separation in pixels of the PSFlets. Default 15.02. phi: float The", "_transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y = np.zeros(x.shape) nlam", "robust down to per-lenslet signal-to-noise ratios of order unity (or even a little", "optimize the coefficients. # We want to start with a decent guess, so", "lenslet array Returns ------- interp_x: float X coordinate on the detector interp_y: float", "the appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration", "wavelengths at which the calibration is done 1. a 2D ndarray with the", "/ 2) vals_sorted = np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score def", "raise AttributeError(\"order must be integer, coef should be a list.\") try: if not", "is sufficient to cover the detector for the fiducial lenslet spacing. \"\"\" #############################################################", "y**2) / (2 * sig**2)) if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian,", "coefficients of the wavelength solution. These will be used to smooth/interpolate the wavelength", "+= coef[i] * x**ix * y**iy i += 1 if highordercoef is None:", "= 100 interp_lam = np.linspace(lam1, lam2, n_spline) dy = [] dx = []", "infile is None: infile = re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile)", "lenslet PSFlets lie within the detector? ############################################################# good = (_x > 5) *", "self.nlam = None self.nlam_max = None self.interp_arr = None self.order = None if", "or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048): continue", "self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam =", "coef: lists floats Polynomial coefficients of fit for a single wavelength xindx: int", "Saves wavelength calibration file Parameters ---------- outdir: String Directory in which to put", "1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12) i = 0", "x0=0, y0=0): \"\"\" Private function _initcoef in locate_psflets Create a set of coefficients", "np.zeros(y.shape) good = np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x", "multi-extension FITS file, each extension corresponding to: 0. the list of wavelengths at", "continue if pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except:", ">0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") n = (order +", "= _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered,", "out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam, allcoef, order=3): ''' Set", "log = logging.getLogger('main') class PSFLets: \"\"\" Helper class to deal with the PSFLets", "SNR/PSFlet ~ 1 # Create slice indices for subimages to perform the intial", "method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0]", "noise, and returns the esimated positions of the spot centroids. This is designed", "= _transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam,", "+ 1) x, y = np.meshgrid(x, x) ############################################################# # Set up polynomial coefficients,", "detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder,", "_y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12) i =", "frame \" + inImage.filename) res = optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s],", "output apart from the rotation matrix given by scale and phi. \"\"\" try:", "iy in range(order1 - ix + 1): _x += coef[i] * x**ix *", "= re.sub('//', '/', outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try:", "mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac / 2)", "+ 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of lenslet IDs,", "+ 1): coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder,", "break self.xindx = x[:, :, :nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam =", ":, nlam_max] == 0): break self.xindx = x[:, :, :nlam_max] self.yindx = y[:,", "lenslets 3. a 2D ndarray with the number of valid wavelengths for a", ":nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0):", "of each pixel within a microspectrum Parameters ---------- lam: float Wavelength in nm", "Estimated spot centroids in x. y: 2D ndarray Estimated spot centroids in y.", "best-fit polynomial fits to the coefficients of the wavelength solution. These will be", "of the first-order polynomial coefficients set, the optimizer refines these and the higher-order", "ix_arr: for iy in iy_arr: if init: coef = _initcoef(polyorder, x0=ix + xdim", "of the lenslets. Default atan(1.926) x0: float x offset to apply to the", "String If load is True, this is the directory in which the file", "file resides ''' self.xindx = None self.yindx = None self.lam_indx = None self.nlam", "\"\"\" def __init__(self, load=False, infile=None, infiledir='.'): ''' Initialize the class Parameters ---------- load:", "lam2, n_spline) dy = [] dx = [] for i in range(n_spline): coef", "np.meshgrid(xindx, xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline] + list(xindx.shape))) interp_y = np.zeros(interp_x.shape)", "(disregarding those with the most and the least flux to limit the impact", "and y. With all of the first-order polynomial coefficients set, the optimizer refines", "* sig**2)) if inImage.ivar is None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered", "log.info(\"Initializing PSFlet location transformation coefficients\") init = True else: ix_arr = np.arange(-3.0, 3.05,", "polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin = res.x coef_opt =", "infile=None, infiledir='.'): ''' Initialize the class Parameters ---------- load: Boolean Whether to load", "> 5) * (_x < xdim - 5) * (_y > 5) *", "Image class, assumed to be a monochromatic grid of spots with read noise", "sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an Image", "Parameters ---------- coef: lists floats Polynomial coefficients of fit for a single wavelength", "float Wavelength in nm allcoef: list of floats List describing the polynomial coefficients", "spots inside the detector footprint coef: list of floats List of best-fit polynomial", "class to deal with the PSFLets on the detector. Does most of the", "AttributeError(\"order must be integer, coef should be a list.\") try: if not order", "polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt =", "RuntimeError(\"File \" + infile + \" does not appear to contain a CHARIS", "except: raise def geninterparray(self, lam, allcoef, order=3): ''' Set up array to solve", "None: if alllam is None or allcoef is None: raise ValueError(\"Interpolation array has", "self.interp_arr[k] * np.log(interp_lam[i])**k interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape)", "Important note: as of now (09/2015), the number of lenslets to grid is", "None: lam1 = np.amin(lam) / 1.04 if lam2 is None: lam2 = np.amax(lam)", "ndarray image convolved with gaussian PSFlet order: int order of the polynomial fit", "0.25 # = (1/4)*((2*n + 3)**2 - 1) = len(coef) order1 = int(np.sqrt(len(coef)", "1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0 for", "as 1/10 the dimensionality of the final array. This is sufficient to cover", "ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for iy in iy_arr: if init: coef", "try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam, allcoef, order=3): ''' Set up", "order of the polynomial fit trimfrac: float fraction of outliers (high & low", "lenslet array yindx: int Y index of lenslet in lenslet array Returns -------", "iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:,", "# to 4% below and 3% above limits of the coefficient file by", "decent guess, so we use a grid of # offsets. Seems to be", "coef_opt[(polyorder + 1) * (polyorder + 2) / 2] += subshape ############################################################# #", "optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol':", "order1: continue _y += coef[i] * x**ix * y**iy i += 1 return", "np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\")", "(order+1)*(order+2) to be optimized. Notes ----- The list of coefficients has space for", "coordinates of lenslets y: ndarray coordinates of lenslets filtered: ndarray image convolved with", "lenslet array order: int Order of polynomial wavelength solution Returns ------- interp_x: float", "+ \". Directory does not exist.\") outfile = re.sub('//', '/', outdir + '/PSFloc.fits')", "- subshape, y0=iy + ydim / 2. - subshape, scale=scale, phi=phi) else: coef", "refines these and the higher-order coefficients. This routine seems to be relatively robust", "a given polynomial fit Parameters ---------- coef: lists floats Polynomial coefficients of fit", "within the detector? ############################################################# good = (_x > 5) * (_x < xdim", "infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration file Parameters ----------", "1 for ix in range(order1 + 1): for iy in range(order1 - ix", "are slightly to the right to get a good initial guess. ############################################################# log.info(\"Performing", "* (coeforder + 2)) for k in range(self.order + 1): coef += self.interp_arr[k]", "of order unity (or even a little less). Important note: as of now", "trimmed on the high end, 5% on the low end) Returns ------- x:", "2): pass # raise ValueError(\"Number of coefficients incorrect for polynomial order.\") except: raise", "vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip =", "in range(1, interporder + 1): coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k -", "self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except:", "coordinates \"\"\" try: if not len(coef) == (order + 1) * (order +", "in range(order1 - ix + 1): _x += coef[i] * x**ix * y**iy", "values\") init = False bestval = 0 subshape = xdim * 3 //", "''' if not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel solution to directory \"", "range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y = interp_y[:, ix, iy] if np.all(pix_x", "the dimensionality of the final array. This is sufficient to cover the detector", "polynomial fit coef: list of floats List of the coefficients. Must match the", "ix in ix_arr: for iy in iy_arr: if init: coef = _initcoef(polyorder, x0=ix", "subshape, scale=scale, phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder", "subfiltered, polyorder, trimfrac) if newval < bestval: bestval = newval coef_opt = copy.deepcopy(coef)", "corresponding to: 0. the list of wavelengths at which the calibration is done", "np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y", "hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile + \" does not appear to", "all lenslets 3. a 2D ndarray with the number of valid wavelengths for", "i += 1 return [_x, _y] def _corrval(coef, x, y, filtered, order, trimfrac=0.1,", "<=5\") except: raise ValueError(\"Polynomial order must be integer\") n = (order + 1)", "file infile: String If load is True, this is the name of the", "+ 1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0 for ix", "+ list(xindx.shape))) interp_y = np.zeros(interp_x.shape) interp_lam = np.linspace(lam1, lam2, n_spline) for i in", "the polynomial coordinate transformation. Default 2. sig: float standard deviation of convolving Gaussian", "filtered image. ############################################################# x = np.arange(-1 * int(3 * sig + 1), int(3", "unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') +", "iy_arr: if init: coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape,", "float The linear separation in pixels of the PSFlets. Default 15.02. phi: float", "vals_sorted = np.sort(vals_ok) score = -1 * np.sum(vals_sorted[iclip:-iclip]) return score def locatePSFlets(inImage, polyorder=2,", "np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]],", "np.exp(-(x**2 + y**2) / (2 * sig**2)) if inImage.ivar is None: unfiltered =", "self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters ----------", "grid of centroids. Should be close to the true value for the PSF-let", "the intial # fits on. The new dimensionality in both x and y", "= 0 for ix in range(order1 + 1): for iy in range(order1 -", "x = np.arange(-1 * int(3 * sig + 1), int(3 * sig +", "of the detector area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting to save pixel", "ValueError(\"Number of coefficients incorrect for polynomial order.\") xindx = np.arange(-100, 101) xindx, yindx", "initial guess of the coefficients of polynomial coordinate transformation trimfrac: float fraction of", "first-order polynomial coefficients set, the optimizer refines these and the higher-order coefficients. This", "res.x if not res.success: log.info(\"Optimizing PSFlet location transformation coefficients may have failed for", "of polynomial wavelength solution Notes ----- Populates the attribute interp_arr in PSFLet class", "* (polyorder + 2) / 2] += subshape ############################################################# # If we have", "n_spline) for i in range(n_spline): coef = np.zeros((coeforder + 1) * (coeforder +", "1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12) i", "not appear to contain a CHARIS wavelength solution in the appropriate format.\") self.nlam_max", "lam1=None, lam2=None): ''' Returns the spectral resolution and interpolated wavelength array Parameters ----------", "= np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline = 100 interp_x =", "1] = scale * np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef)", "combined) to trim Default 0.1 (5% trimmed on the high end, 5% on", "float Longest wavelength in nm Returns ------- interp_lam: array Array of wavelengths R:", "np.amin(lam) / 1.04 if lam2 is None: lam2 = np.amax(lam) * 1.03 interporder", "on the high end, 5% on the low end) Returns ------- x: 2D", "s=0) y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1,", "in the pipeline. Parameters ---------- imImage: Image class Assumed to be a monochromatic", "interp_x: float X coordinate on the detector interp_y: float Y coordinate on the", "the detector ''' if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) - 1 interp_x,", "class, assumed to be a monochromatic grid of spots with read noise and", "must be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") n =", "polynomial coefficients that best fit the lenslets, for all wavelengths order: int Order", "_transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y if self.interp_arr is None: self.geninterparray(lam, allcoef,", "the lenslet PSFlets lie within the detector? ############################################################# good = (_x > 5)", "out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self,", "y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix, iy]]", "optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt", "the coordinates using a polynomial. Parameters ---------- x: ndarray Rectilinear grid y: ndarray", "x, y = np.meshgrid(x, x) ############################################################# # Set up polynomial coefficients, convert from", "int(np.sqrt(allcoef.shape[1])) - 1 n_spline = 100 interp_lam = np.linspace(lam1, lam2, n_spline) dy =", "+ 1)) for i in range(1, order + 1): xarr[:, i] = np.log(lam)**i", "Returns the spectral resolution and interpolated wavelength array Parameters ---------- lam: float Wavelength", "+= 1 return coef def _transform(x, y, order, coef, highordercoef=None): \"\"\" Private function", "+ 1): for iy in range(order1 - ix + 1): if ix +", "get a good initial guess. ############################################################# log.info(\"Performing final optimization of PSFlet location transformation", "shortorder: coef[i] = coefshort[j] j += 1 i += 1 for ix in", "+= coef[i] * x**ix * y**iy i += 1 return [_x, _y] def", "for ix in range(order1 + 1): for iy in range(order1 - ix +", "a Gaussian, centroid the filtered image. ############################################################# x = np.arange(-1 * int(3 *", "= interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:, :,", "in range(order2 + 1): for iy in range(order1 - ix + 1): if", "of wavelength # solution. Obtain extrapolated limits of wavlength solution # to 4%", "iy] pix_y = interp_y[:, ix, iy] if np.all(pix_x < 0) or np.all(pix_x >", "distortion scale: float The linear separation in pixels of the PSFlets. Default 15.02.", "vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac / 2) vals_sorted = np.sort(vals_ok) score =", "/ 2] += iy - subshape newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s],", "the x,y detector location of a given lenslet for a given polynomial fit", "_s:-_s], subfiltered, polyorder, trimfrac) if newval < bestval: bestval = newval coef_opt =", "for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] = coef", "+ [1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good", "coefficients. Must match the length required by order = (order+1)*(order+2) highordercoef: Boolean Returns", "np.all(pix_y < 0) or np.all(pix_y > 2048): continue if pix_y[-1] < pix_y[0]: try:", "order + 1] = scale * np.cos(phi) return list(coef) def _pullorder(coef, order=1): coeforder", "of the heavy lifting during the wavelength calibration step. \"\"\" def __init__(self, load=False,", "most of the heavy lifting during the wavelength calibration step. \"\"\" def __init__(self,", "np.linspace(lam1, lam2, n_spline) for i in range(n_spline): coef = np.zeros((coeforder + 1) *", "for iy in range(order1 - ix + 1): _y += coef[i] * x**ix", "range(order1 - ix + 1): if ix + iy <= order1: continue _x", "= coef def return_locations_short(self, coef, xindx, yindx): ''' Returns the x,y detector location", "inImage.filename) _x, _y = _transform(x, y, polyorder, coef_opt) ############################################################# # Boolean: do the", "offsets. Seems to be robust down to SNR/PSFlet ~ 1 # Create slice", "not been computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef =", "np.zeros(xindx.shape) for ix in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x = interp_x[:, ix,", "y = np.meshgrid(x, x) ############################################################# # Set up polynomial coefficients, convert from lenslet", "self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1),", "0.1 (5% trimmed on the high end, 5% on the low end) highordercoef:", "outside of the detector area) ''' if not os.path.isdir(outdir): raise IOError(\"Attempting to save", "polynomial wavelength solution Notes ----- Populates the attribute interp_arr in PSFLet class '''", "gridfrac = 20 ydim, xdim = inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim", "we have coefficients from last time, we assume that we # are now", "pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise else:", "matrix plus zeros. Parameters ---------- order: int The polynomial order of the grid", "angle and scale. A loop then does a quick check to find reasonable", "flux to limit the impact of outliers). Analogous to the trimmed mean. Parameters", "np.meshgrid(x, x) ############################################################# # Set up polynomial coefficients, convert from lenslet # coordinates", "np.sin(phi) coef[n / 2] = y0 coef[n / 2 + 1] = scale", "coefficients, if not supplied, are initially set to the known pitch angle and", "gridfrac + 1) x, y = np.meshgrid(x, x) ############################################################# # Set up polynomial", "for order 3, up to terms like x**3 and x**2*y, but not x**3*y).", "101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline = 100 interp_x = np.zeros(tuple([n_spline] +", "int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) shortorder = int(np.sqrt(len(coefshort) + 0.25) -", "1e-12) coef_short = [] i = 0 for ix in range(coeforder + 1):", "self.interp_arr = np.zeros((order + 1, allcoef.shape[1])) self.order = order xarr = np.ones((lam.shape[0], order", "has not been computed. Must call monochrome_coef with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef", "range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx,", "the wavelength solution. These will be used to smooth/interpolate the wavelength solution, and", "deviation of convolving Gaussian used for estimating the grid of centroids. Should be", "are initially set to the known pitch angle and scale. A loop then", "wavelength solution, and ultimately to compute its inverse. Parameters ---------- lam: float Wavelength", "centroids. This is designed to constrain the domain of the PSF-let fitting later", "of all lenslets 3. a 2D ndarray with the number of valid wavelengths", "in range(xindx.shape[0]): for iy in range(xindx.shape[1]): pix_x = interp_x[:, ix, iy] pix_y =", "try: self.xindx = hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int)", "[] i = 0 for ix in range(coeforder + 1): for iy in", "= x[:, :, :nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam = nlam self.lam_indx", "if ix + iy <= shortorder: coef[i] = coefshort[j] j += 1 i", "at the center of each pixel within a microspectrum Parameters ---------- lam: float", "for a given lenslet (some wavelengths fall outside of the detector area) '''", "(5% trimmed on the high end, 5% on the low end) Returns -------", "lenslets 2. a 2D ndarray with the Y position of all lenslets 3.", "self.order = None if load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads", "2 + 1] = scale * np.sin(phi) coef[n / 2 + order +", "nm lam2: float Highest wavelength in nm Notes ----- This functions fills in", "nm allcoef: list of lists floats Polynomial coefficients of wavelength solution xindx: int", "def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength calibration file Parameters ---------- outdir: String Directory", ":, :nlam_max] self.nlam = nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam)", "y**iy i += 1 for ix in range(order1 + 1): for iy in", "score def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function", "= np.arange(0, 25, 0.5) log.info(\"Initializing PSFlet location transformation coefficients\") init = True else:", "---------- load: Boolean Whether to load an already-existing wavelength calibration file infile: String", "= int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12) i = 0 for ix", "in range(order1 + 1): for iy in range(order1 - ix + 1): _x", "None: unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same') else: unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian,", "= _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt),", "then does a quick check to find reasonable offsets in x and y.", "x.shape[0] * 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr:", "np.linspace(lam1, lam2, n_spline) dy = [] dx = [] for i in range(n_spline):", "Order of the polynomial fit coef: list of floats List of the coefficients.", "coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return", "optimization of PSFlet location transformation coefficients for frame \" + inImage.filename) coef_lin =", "y, polyorder, coef_opt) ############################################################# # Boolean: do the lenslet PSFlets lie within the", "_pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) coef_short =", "lam: float Wavelength in nm allcoef: list of floats Polynomial coefficients of wavelength", "of floats List of best-fit polynomial coefficients Notes ----- the coefficients, if not", "the spectral resolution and interpolated wavelength array Parameters ---------- lam: float Wavelength in", "float Y coordinate on the detector ''' if len(allcoef.shape) == 1: coeforder =", "ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y: Grid of lenslet IDs, Lenslet (0, 0) is", "and fitorder is not None: coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin,", "---------- lam: float Wavelength in nm allcoef: list of lists floats Polynomial coefficients", "has space for a polynomial fit of the input order (i.e., for order", "// gridfrac + 1) x, y = np.meshgrid(x, x) ############################################################# # Set up", "from scipy import interpolate, ndimage, optimize, signal try: from charis.image import Image except:", "int X index of lenslet in lenslet array yindx: int Y index of", "the file. The file is name PSFloc.fits and is a multi-extension FITS file,", "of lenslet in lenslet array yindx: int Y index of lenslet in lenslet", "interp_x[:, ix, iy] pix_y = interp_y[:, ix, iy] if np.all(pix_x < 0) or", "import numpy as np from astropy.io import fits from scipy import interpolate, ndimage,", "0.25) - 1.5 + 1e-12) i = 0 for ix in range(order2 +", "* (polyorder + 2) / 2] += iy - subshape newval = _corrval(coef,", "terms like x**3 and x**2*y, but not x**3*y). It is all zeros in", "ndarray with the X position of all lenslets 2. a 2D ndarray with", "order=1): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) coef_short = []", "last time, we assume that we # are now at a slightly higher", "file, each extension corresponding to: 0. the list of wavelengths at which the", "in wavelengths of spots, coefficients of wavelength # solution. Obtain extrapolated limits of", "to find reasonable offsets in x and y. With all of the first-order", "with arrays.\") self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order +", "the lenslets. Default atan(1.926) x0: float x offset to apply to the central", "in the appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"): ''' Saves wavelength", "= optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol':", "1 interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y if self.interp_arr", "int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12) i = 0 j = 0", "on the high end, 5% on the low end) highordercoef: boolean Returns -------", "valid wavelengths for a given lenslet (some wavelengths fall outside of the detector", "be a monochromatic grid of spots with read noise and shot noise, and", "trimfrac=0.1, phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None): \"\"\" function locatePSFlets takes an Image class, assumed", "float Y coordinate on the detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x,", "+ iy <= shortorder: coef[i] = coefshort[j] j += 1 i += 1", "on the detector interp_y: float Y coordinate on the detector ''' coeforder =", "trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in locate_psflets Return the negative of the", "phi=phi) else: coef = copy.deepcopy(coef_save) coef[0] += ix - subshape coef[(polyorder + 1)", "# coordinates to coordinates on the detector array. # Then optimize the coefficients.", "bestval = 0 subshape = xdim * 3 // 8 _s = x.shape[0]", "order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y", "iy in range(order1 - ix + 1): if ix + iy <= order1:", "# If we have coefficients from last time, we assume that we #", "interpolate, ndimage, optimize, signal try: from charis.image import Image except: from image import", "continue _x += coef[i] * x**ix * y**iy i += 1 for ix", "the low end) highordercoef: boolean Returns ------- score: float Negative sum of PSFlet", "1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac,", "from last time, we assume that we # are now at a slightly", "############################################################# good = (_x > 5) * (_x < xdim - 5) *", "with the number of valid wavelengths for a given lenslet (some wavelengths fall", "in locate_psflets Return the negative of the sum of the middle XX% of", "now at a slightly higher wavelength, so try out offsets # that are", "tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] = y2 - y1 +", "None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1 if not (coeforder +", "to get a good initial guess. ############################################################# log.info(\"Performing final optimization of PSFlet location", "[_x, _y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12) i", "coef_opt = _insertorder(coef_lin, coef_opt) else: res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder,", "lie within the detector? ############################################################# good = (_x > 5) * (_x <", "order == int(order): raise ValueError(\"Polynomial order must be integer\") else: if order <", "iy in iy_arr: if init: coef = _initcoef(polyorder, x0=ix + xdim / 2.", "= np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0 for ix in range(order1 +", "1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix, iy] = y2 -", "the PSF-let spots. Default 0.7. coef: list initial guess of the coefficients of", "floats Polynomial coefficients of wavelength solution order: int Order of polynomial wavelength solution", "outdir + '/PSFloc.fits') out = fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except:", "to the trimmed mean. Parameters ---------- coef: list of floats coefficients for polynomial", "iclip = int(vals_ok.shape[0] * trimfrac / 2) vals_sorted = np.sort(vals_ok) score = -1", "coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6,", "range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0): break self.xindx = x[:, :, :nlam_max]", "_corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in locate_psflets", "x0=ix + xdim / 2. - subshape, y0=iy + ydim / 2. -", "floats coefficients for polynomial transformation x: ndarray coordinates of lenslets y: ndarray coordinates", "on the detector interp_y: float Y coordinate on the detector ''' if len(allcoef.shape)", "in nm Notes ----- This functions fills in most of the fields of", "rotation matrix plus zeros. Parameters ---------- order: int The polynomial order of the", "range(order2 + 1): for iy in range(order1 - ix + 1): if ix", "* np.log(lam)**k return coef def return_locations(self, lam, allcoef, xindx, yindx, order=3): ''' Calculates", "ix + iy <= order1: continue _x += coef[i] * x**ix * y**iy", "i += 1 return coef_short def _insertorder(coefshort, coef): coeforder = int(np.sqrt(len(coef) + 0.25)", "coordinates of lenslet located at `xindx`, `yindx` for desired wavelength `lam` Parameters ----------", "[1000])) y = np.zeros(x.shape) nlam = np.zeros(xindx.shape, np.int) lam_out = np.zeros(y.shape) good =", "= np.amax(lam) * 1.03 interporder = order if self.interp_arr is None: self.geninterparray(lam, allcoef,", "+ 1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1) lam_out[ix, iy,", "in nm allcoef: list of lists floats Polynomial coefficients of wavelength solution xindx:", "for k in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k interp_x, interp_y", "+ 1.5)**2 - 0.25 # = (1/4)*((2*n + 3)**2 - 1) = len(coef)", "Rectilinear grid order: int Order of the polynomial fit coef: list of floats", "used to smooth/interpolate the wavelength solution, and ultimately to compute its inverse. Parameters", "List of the coefficients. Must match the length required by order = (order+1)*(order+2)", "trimmed mean. Parameters ---------- coef: list of floats coefficients for polynomial transformation x:", "in which the file resides ''' if infile is None: infile = re.sub('//',", "---------- outdir: String Directory in which to put the file. The file is", "Directory in which to put the file. The file is name PSFloc.fits and", "(_x > 5) * (_x < xdim - 5) * (_y > 5)", "class PSFLets: \"\"\" Helper class to deal with the PSFLets on the detector.", "xindx = np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline = 100 interp_x", "k=1, s=0) y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x,", "central pixel. Default 0 y0: float y offset to apply to the central", "range(order1 - ix + 1): _y += coef[i] * x**ix * y**iy i", "phi: float The pitch angle of the lenslets. Default atan(1.926) x0: float x", "polynomial coefficients, convert from lenslet # coordinates to coordinates on the detector array.", "init = True else: ix_arr = np.arange(-3.0, 3.05, 0.2) iy_arr = np.arange(-3.0, 3.05,", "from charis.image import Image except: from image import Image log = logging.getLogger('main') class", "the rotation matrix given by scale and phi. \"\"\" try: if not order", "x,y detector location of a given lenslet for a given polynomial fit Parameters", "= False bestval = 0 subshape = xdim * 3 // 8 _s", "1): for iy in range(order1 - ix + 1): _y += coef[i] *", "coeforder, coef) dx += [_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2)", "+ 1), int(3 * sig + 1) + 1) x, y = np.meshgrid(x,", "yindx: int Y index of lenslet in lenslet array Returns ------- interp_x: float", "iy <= order1: continue _x += coef[i] * x**ix * y**iy i +=", "def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None): \"\"\" Calculates the wavelength at the", "a given lenslet (some wavelengths fall outside of the detector area) ''' if", "+ 1): coef += self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self, lam, allcoef,", "= np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i]", "to be robust down to SNR/PSFlet ~ 1 # Create slice indices for", "fit trimfrac: float fraction of outliers (high & low combined) to trim Default", "coef[0] += ix - subshape coef[(polyorder + 1) * (polyorder + 2) /", "------- coef: list of floats A list of length (order+1)*(order+2) to be optimized.", "lifting during the wavelength calibration step. \"\"\" def __init__(self, load=False, infile=None, infiledir='.'): '''", "of coefficients including a rotation matrix plus zeros. Parameters ---------- order: int The", "(high & low combined) to trim Default 0.1 (5% trimmed on the high", "low combined) to trim Default 0.1 (5% trimmed on the high end, 5%", "* np.sin(phi) coef[n / 2 + order + 1] = scale * np.cos(phi)", "- 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i = 0", "def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in", "self.interp_arr[k] * np.log(lam)**k interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y", "inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1) x, y", "wavelength solution Returns ------- interp_x: float X coordinate on the detector interp_y: float", "raise ValueError(\"Number of coefficients incorrect for polynomial order.\") except: raise AttributeError(\"order must be", "interp_y[i] = _transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y =", "log.info(\"Initializing transformation coefficients with previous values\") init = False bestval = 0 subshape", "ix - subshape coef[(polyorder + 1) * (polyorder + 2) / 2] +=", "in y. good:2D boolean ndarray True for lenslets with spots inside the detector", "x**ix * y**iy i += 1 return [_x, _y] def _corrval(coef, x, y,", "coef) return interp_x, interp_y def return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None):", "float fraction of lenslet outliers (high & low combined) to trim in the", "= int(np.sqrt(allcoef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef) return interp_x,", "1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1) lam_out[ix, iy, :nlam[ix,", "to trim Default 0.1 (5% trimmed on the high end, 5% on the", "y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x else: log.info(\"Performing initial optimization", "in range(order1 + 1): for iy in range(order1 - ix + 1): _y", "y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0) nlam[ix,", "1.5 + 1e-12) i = 0 for ix in range(order2 + 1): for", "dy = [] dx = [] for i in range(n_spline): coef = np.zeros((coeforder", "x: ndarray coordinates of lenslets y: ndarray coordinates of lenslets filtered: ndarray image", "float Effective spectral resolution ''' if lam1 is None: lam1 = np.amin(lam) /", "* (coeforder + 2)) for k in range(interporder + 1): coef += self.interp_arr[k]", "= re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx = hdulist[0].data", "interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]): if np.all(y[:, :, nlam_max]", "// gridfrac), ydim // gridfrac + 1) x, y = np.meshgrid(x, x) #############################################################", "self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx, yindx, coeforder, coef) dx", "grid of spots polyorder: float order of the polynomial coordinate transformation. Default 2.", "lam, alllam=None, allcoef=None, order=3): if self.interp_arr is None: if alllam is None or", "= np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(interporder +", "1e-5, 'ftol': 1e-5}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) else: res =", "# Read in wavelengths of spots, coefficients of wavelength # solution. Obtain extrapolated", "------- x: 2D ndarray Estimated spot centroids in x. y: 2D ndarray Estimated", "1) = len(coef) order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12) _x", "to 4% below and 3% above limits of the coefficient file by #", "coeforder, coef) return interp_x, interp_y def return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None,", "import logging import os import re import numpy as np from astropy.io import", "1): for iy in range(coeforder - ix + 1): if ix + iy", "yindx, coeforder, coef) dx += [_dx] dy += [_dy] R = np.sqrt(np.asarray(dy)**2 +", "must be integer\") # n**2 + 3*n + 2 = (n + 1.5)**2", "= hdulist[0].data self.yindx = hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise", "the high end, 5% on the low end) Returns ------- x: 2D ndarray", "coef_opt = copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of PSFlet location transformation coefficients", "3, up to terms like x**3 and x**2*y, but not x**3*y). It is", "the polynomial fit coef: list of floats List of the coefficients. Must match", "raise ValueError(\"Number of coefficients incorrect for polynomial order.\") coef = np.zeros((coeforder + 1)", "Effective spectral resolution ''' if lam1 is None: lam1 = np.amin(lam) / 1.04", "down to per-lenslet signal-to-noise ratios of order unity (or even a little less).", "coef should be a list.\") try: if not order == int(order): raise ValueError(\"Polynomial", "sig + 1), int(3 * sig + 1) + 1) x, y =", ":nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x) for nlam_max in range(x.shape[-1]): if", "Returns the x,y detector location of a given lenslet for a given polynomial", "ndarray with the Y position of all lenslets 3. a 2D ndarray with", "to constrain the domain of the PSF-let fitting later in the pipeline. Parameters", "1) * (coeforder + 2)) for k in range(1, interporder + 1): coef", "ix + 1): if ix + iy <= shortorder: coef[i] = coefshort[j] j", "/= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10 filtered = ndimage.interpolation.spline_filter(unfiltered) ############################################################# # x, y:", "of wavelength solution xindx: int X index of lenslet in lenslet array yindx:", "to perform the intial # fits on. The new dimensionality in both x", "lenslet in lenslet array yindx: int Y index of lenslet in lenslet array", "in range(x.shape[-1]): if np.all(y[:, :, nlam_max] == 0): break self.xindx = x[:, :,", "optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})", "in range(self.order + 1): coef += self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self,", "interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef) return interp_x, interp_y if self.interp_arr is", "= optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell',", "a polynomial. Parameters ---------- x: ndarray Rectilinear grid y: ndarray of floats Rectilinear", "gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2)) if inImage.ivar is None:", "len(coef) == (order + 1) * (order + 2): pass # raise ValueError(\"Number", "(coeforder + 2) == allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\")", "order if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1])) - 1", "+ 1) x, y = np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2) /", "failed for frame \" + inImage.filename) _x, _y = _transform(x, y, polyorder, coef_opt)", "= interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise else: tck_y = interpolate.splrep(pix_y,", "lenslet in lenslet array order: int Order of polynomial wavelength solution lam1: float", "in most of the fields of the PSFLet class: the array of xindx,", "coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt = res.x", "coef[order + 1] = -scale * np.sin(phi) coef[n / 2] = y0 coef[n", "routine seems to be relatively robust down to per-lenslet signal-to-noise ratios of order", "is None: ix_arr = np.arange(0, 14, 0.5) iy_arr = np.arange(0, 25, 0.5) log.info(\"Initializing", "yindx, coeforder, coef) return interp_x, interp_y def return_res(self, lam, allcoef, xindx, yindx, order=3,", "lam2 = np.amax(lam) * 1.03 interporder = order if self.interp_arr is None: self.geninterparray(lam,", "2)) for k in range(1, interporder + 1): coef += k * self.interp_arr[k]", "monochrome_coef(self, lam, alllam=None, allcoef=None, order=3): if self.interp_arr is None: if alllam is None", "zeros. Parameters ---------- order: int The polynomial order of the grid distortion scale:", "Parameters ---------- lam: float Wavelength in nm allcoef: list of floats Polynomial coefficients", "the file infiledir: String Directory in which the file resides ''' if infile", "* np.sin(phi) coef[n / 2] = y0 coef[n / 2 + 1] =", "numpy as np from astropy.io import fits from scipy import interpolate, ndimage, optimize,", "lam, allcoef, order=3): ''' Set up array to solve for best-fit polynomial fits", "res.x else: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame \"", "range(1, order + 1): xarr[:, i] = np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef", "from lenslet # coordinates to coordinates on the detector array. # Then optimize", "None self.nlam = None self.nlam_max = None self.interp_arr = None self.order = None", "ndimage, optimize, signal try: from charis.image import Image except: from image import Image", "PSFlet location transformation coefficients may have failed for frame \" + inImage.filename) _x,", "of PSFlet location transformation coefficients for frame \" + inImage.filename) res = optimize.minimize(_corrval,", "Wavelength in nm allcoef: list of lists floats Polynomial coefficients of wavelength solution", "do the lenslet PSFlets lie within the detector? ############################################################# good = (_x >", "- ix + 1): _y += coef[i] * x**ix * y**iy i +=", "allcoef.shape[1]: raise ValueError(\"Number of coefficients incorrect for polynomial order.\") coef = np.zeros((coeforder +", "load: self.loadpixsol(infile, infiledir) def loadpixsol(self, infile=None, infiledir='./calibrations'): ''' Loads existing wavelength calibration file", "None: raise ValueError(\"Interpolation array has not been computed. Must call monochrome_coef with arrays.\")", "Order of polynomial wavelength solution Returns ------- interp_x: float X coordinate on the", "order must be >0, <=5\") except: raise ValueError(\"Polynomial order must be integer\") n", "float fraction of outliers (high & low combined) to trim Default 0.1 (5%", "angle of the lenslets. Default atan(1.926) x0: float x offset to apply to", "Return the negative of the sum of the middle XX% of the PSFlet", "#!/usr/bin/env python import copy import glob import logging import os import re import", "Parameters ---------- load: Boolean Whether to load an already-existing wavelength calibration file infile:", "pixel solution to directory \" + outdir + \". Directory does not exist.\")", "that best fit the lenslets, for all wavelengths order: int Order of the", "coefficients incorrect for polynomial order.\") xindx = np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx,", "order.\") coef = np.zeros((coeforder + 1) * (coeforder + 2)) for k in", "_y += coef[i] * x**ix * y**iy i += 1 if highordercoef is", "Helper class to deal with the PSFLets on the detector. Does most of", "1): if ix + iy <= shortorder: coef[i] = coefshort[j] j += 1", "coefficients for frame \" + inImage.filename) if not init and fitorder is not", "(polyorder + 2) / 2] += iy - subshape newval = _corrval(coef, x[_s:-_s,", "interporder = order if self.interp_arr is None: self.geninterparray(lam, allcoef, order=order) coeforder = int(np.sqrt(allcoef.shape[1]))", "None: return [_x, _y] else: order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 +", "+ 1) * (coeforder + 2)) for k in range(interporder + 1): coef", "None self.lam_indx = None self.nlam = None self.nlam_max = None self.interp_arr = None", "1): coef += self.interp_arr[k] * np.log(lam)**k return coef def return_locations(self, lam, allcoef, xindx,", "None self.nlam_max = None self.interp_arr = None self.order = None if load: self.loadpixsol(infile,", "not None: coef_lin = _pullorder(coef_opt, fitorder) res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered,", "file by # default. ################################################################### if lam1 is None: lam1 = np.amin(lam) /", "Polynomial coefficients of fit for a single wavelength xindx: int X index of", "<= shortorder: coef[i] = coefshort[j] j += 1 i += 1 for ix", "infile: String If load is True, this is the name of the file", "order: int Order of polynomial wavelength solution Returns ------- interp_x: float X coordinate", "interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef) x = np.zeros(tuple(list(xindx.shape) + [1000])) y", "interp_lam, k=1, s=0) y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))] tck_x = interpolate.splrep(interp_lam,", "8 _s = x.shape[0] * 3 // 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for", "coordinate on the detector ''' if len(allcoef.shape) == 1: coeforder = int(np.sqrt(allcoef.shape[0])) -", "coefficients, convert from lenslet # coordinates to coordinates on the detector array. #", "np.log(lam)**i for i in range(self.interp_arr.shape[1]): coef = np.linalg.lstsq(xarr, allcoef[:, i])[0] self.interp_arr[:, i] =", "= nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max = np.amax(nlam) def _initcoef(order, scale=15.02,", "3.05, 0.2) iy_arr = np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients", "or np.all(pix_y > 2048): continue if pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1],", "2048): continue if pix_y[-1] < pix_y[0]: try: tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0)", "on the detector array. # Then optimize the coefficients. # We want to", "np.zeros((coeforder + 1) * (coeforder + 2)) for k in range(interporder + 1):", "self.xindx = x[:, :, :nlam_max] self.yindx = y[:, :, :nlam_max] self.nlam = nlam", "i in range(1, order + 1): xarr[:, i] = np.log(lam)**i for i in", "allcoef is None: raise ValueError(\"Interpolation array has not been computed. Must call monochrome_coef", "the detector interp_y: float Y coordinate on the detector ''' if len(allcoef.shape) ==", "by scale and phi. \"\"\" try: if not order == int(order): raise ValueError(\"Polynomial", "not len(coef) == (order + 1) * (order + 2): pass # raise", "init = False bestval = 0 subshape = xdim * 3 // 8", "iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]],", "newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac) if newval <", "filtered, polyorder, trimfrac), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5}) coef_opt = res.x if not", "coef_opt) ############################################################# # Boolean: do the lenslet PSFlets lie within the detector? #############################################################", "interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def return_res(self, lam, allcoef,", "guess, so we use a grid of # offsets. Seems to be robust", "`yindx` for desired wavelength `lam` Parameters ---------- lam: float Wavelength in nm allcoef:", "PSFlet fluxes, to be minimized \"\"\" ################################################################# # Use np.nan for lenslet coordinates", "\"\"\" Helper class to deal with the PSFLets on the detector. Does most", "copy.deepcopy(coef) if init: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame", "_initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0): \"\"\" Private function _initcoef in locate_psflets Create", "(or even a little less). Important note: as of now (09/2015), the number", "array yindx: int Y index of lenslet in lenslet array Returns ------- interp_x:", "\" + inImage.filename) coef_lin = _pullorder(coef_opt, 1) res = optimize.minimize(_corrval, coef_lin, args=( x[_s:-_s,", "y**iy i += 1 return [_x, _y] def _corrval(coef, x, y, filtered, order,", "of the sum of the middle XX% of the PSFlet spot fluxes (disregarding", "order must be integer\") # n**2 + 3*n + 2 = (n +", "solution Notes ----- Populates the attribute interp_arr in PSFLet class ''' self.interp_arr =", "in nm lam2: float Longest wavelength in nm Returns ------- interp_lam: array Array", "the central pixel. Default 0 y0: float y offset to apply to the", "coef[i] * x**ix * y**iy i += 1 for ix in range(order1 +", "+ iy <= order1: continue _y += coef[i] * x**ix * y**iy i", "resolution and interpolated wavelength array Parameters ---------- lam: float Wavelength in nm allcoef:", "range(coeforder - ix + 1): if ix + iy <= shortorder: coef[i] =", "offset to apply to the central pixel. Default 0 y0: float y offset", "return interp_x, interp_y def return_res(self, lam, allcoef, xindx, yindx, order=3, lam1=None, lam2=None): '''", "the first-order polynomial coefficients set, the optimizer refines these and the higher-order coefficients.", "+ infile + \" does not appear to contain a CHARIS wavelength solution", "fit lam1: float Lowest wavelength in nm lam2: float Highest wavelength in nm", "file is name PSFloc.fits and is a multi-extension FITS file, each extension corresponding", "order: int Order of polynomial wavelength solution lam1: float Shortest wavelength in nm", "the output apart from the rotation matrix given by scale and phi. \"\"\"", "optimize.minimize(_corrval, coef_opt, args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell') coef_opt =", "coef def _transform(x, y, order, coef, highordercoef=None): \"\"\" Private function _transform in locate_psflets", "of lists floats Polynomial coefficients of wavelength solution xindx: int X index of", "= -scale * np.sin(phi) coef[n / 2] = y0 coef[n / 2 +", "lam2=None): \"\"\" Calculates the wavelength at the center of each pixel within a", "hdulist[1].data self.lam_indx = hdulist[2].data self.nlam = hdulist[3].data.astype(int) except: raise RuntimeError(\"File \" + infile", "+ iy <= order: coef_short += [coef[i]] i += 1 for ix in", "close to the true value for the PSF-let spots. Default 0.7. coef: list", "infiledir: String If load is True, this is the directory in which the", "Order of polynomial wavelength solution Notes ----- Populates the attribute interp_arr in PSFLet", "order: int The polynomial order of the grid distortion scale: float The linear", "to the true value for the PSF-let spots. Default 0.7. coef: list initial", "res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-5,", "List describing the polynomial coefficients that best fit the lenslets, for all wavelengths", "optimization of PSFlet location transformation coefficients for frame \" + inImage.filename) if not", "yindx, nlam, lam_indx and nlam_max \"\"\" ################################################################### # Read in wavelengths of spots,", "Image except: from image import Image log = logging.getLogger('main') class PSFLets: \"\"\" Helper", "except: from image import Image log = logging.getLogger('main') class PSFLets: \"\"\" Helper class", "init: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for frame \" +", "= np.arange(-3.0, 3.05, 0.2) coef_save = list(coef[:]) log.info(\"Initializing transformation coefficients with previous values\")", "+ 1): for iy in range(order1 - ix + 1): _y += coef[i]", "find reasonable offsets in x and y. With all of the first-order polynomial", "// 8 subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape]) for ix in ix_arr: for iy in", "y[:, :, :nlam_max] self.nlam = nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max =", "raise ValueError(\"Polynomial order must be integer\") else: if order < 1 or order", "1e-6}) coef_lin = res.x coef_opt = _insertorder(coef_lin, coef_opt) coef_opt[0] += subshape coef_opt[(polyorder +", "fraction of outliers (high & low combined) to trim Default 0.1 (5% trimmed", "PSF-let fitting later in the pipeline. Parameters ---------- imImage: Image class Assumed to", "- 0.25 # = (1/4)*((2*n + 3)**2 - 1) = len(coef) order1 =", "(some wavelengths fall outside of the detector area) ''' if not os.path.isdir(outdir): raise", "of valid wavelengths for a given lenslet (some wavelengths fall outside of the", "below and 3% above limits of the coefficient file by # default. ###################################################################", "Order of polynomial wavelength solution lam1: float Shortest wavelength in nm lam2: float", "order=order) coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1): coef += self.interp_arr[k]", "< 1 or order > 5: raise ValueError(\"Polynomial order must be >0, <=5\")", "coefficients. This routine seems to be relatively robust down to per-lenslet signal-to-noise ratios", "esimated positions of the spot centroids. This is designed to constrain the domain", "interporder + 1): coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx,", "y2 - y1 + 1 y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 +", "(_y > 5) * (_y < ydim - 5) return [_x, _y, good,", "ix + 1): _x += coef[i] * x**ix * y**iy i += 1", "noise and shot noise, and returns the esimated positions of the spot centroids.", "end) Returns ------- x: 2D ndarray Estimated spot centroids in x. y: 2D", "in x. y: 2D ndarray Estimated spot centroids in y. good:2D boolean ndarray", "guess. ############################################################# log.info(\"Performing final optimization of PSFlet location transformation coefficients for frame \"", "CHARIS wavelength solution in the appropriate format.\") self.nlam_max = np.amax(self.nlam) def savepixsol(self, outdir=\"calibrations/\"):", "1 # Create slice indices for subimages to perform the intial # fits", "nm lam2: float Longest wavelength in nm Returns ------- interp_lam: array Array of", "outliers (high & low combined) to trim in the minimization. Default 0.1 (5%", "file. The file is name PSFloc.fits and is a multi-extension FITS file, each", "coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape, y0=iy + ydim", "coordinates _y: ndarray Transformed coordinates \"\"\" try: if not len(coef) == (order +", "_s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6}) coef_lin", "coef[i] = coefshort[j] j += 1 i += 1 return coef def _transform(x,", "done 1. a 2D ndarray with the X position of all lenslets 2.", "It is all zeros in the output apart from the rotation matrix given", "which the file resides ''' if infile is None: infile = re.sub('//', '/',", "coef_opt = res.x else: log.info(\"Performing initial optimization of PSFlet location transformation coefficients for", "of the PSFlet spot fluxes (disregarding those with the most and the least", "fields of the PSFLet class: the array of xindx, yindx, nlam, lam_indx and", "1 interp_x, interp_y = _transform(xindx, yindx, coeforder, coef) return interp_x, interp_y def return_res(self,", "fitting later in the pipeline. Parameters ---------- imImage: Image class Assumed to be", "x**3*y). It is all zeros in the output apart from the rotation matrix", "floats Rectilinear grid order: int Order of the polynomial fit coef: list of", "= inImage.data.shape x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1) x,", "for the fiducial lenslet spacing. \"\"\" ############################################################# # Convolve with a Gaussian, centroid", "* np.cos(phi) coef[order + 1] = -scale * np.sin(phi) coef[n / 2] =", "a list.\") try: if not order == int(order): raise ValueError(\"Polynomial order must be", "= None self.yindx = None self.lam_indx = None self.nlam = None self.nlam_max =", "''' if lam1 is None: lam1 = np.amin(lam) / 1.04 if lam2 is", "1) + 1) x, y = np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2)", "None: infile = re.sub('//', '/', infiledir + '/PSFloc.fits') hdulist = fits.open(infile) try: self.xindx", "coefficients of wavelength solution xindx: int X index of lenslet in lenslet array", "+ 1) * (coeforder + 2)) for k in range(self.order + 1): coef", "lam1 = np.amin(lam) / 1.04 if lam2 is None: lam2 = np.amax(lam) *", "existing wavelength calibration file Parameters ---------- infile: String Name of the file infiledir:", "= np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2)) if", "fits.HDUList(fits.PrimaryHDU(self.xindx)) out.append(fits.PrimaryHDU(self.yindx)) out.append(fits.PrimaryHDU(self.lam_indx)) out.append(fits.PrimaryHDU(self.nlam.astype(int))) try: out.writeto(outfile, overwrite=True) except: raise def geninterparray(self, lam, allcoef,", "args=( x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol':", "yindx: int Y index of lenslet in lenslet array order: int Order of", "----- The list of coefficients has space for a polynomial fit of the", "cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac / 2) vals_sorted", "centroids in x. y: 2D ndarray Estimated spot centroids in y. good:2D boolean", "function _initcoef in locate_psflets Create a set of coefficients including a rotation matrix", "in range(order1 - ix + 1): _y += coef[i] * x**ix * y**iy", "the detector interp_y: float Y coordinate on the detector ''' coeforder = int(np.sqrt(coef.shape[0]))", "to per-lenslet signal-to-noise ratios of order unity (or even a little less). Important", "x, y = np.meshgrid(x, x) gaussian = np.exp(-(x**2 + y**2) / (2 *", "the coefficient file by # default. ################################################################### if lam1 is None: lam1 =", "name PSFloc.fits and is a multi-extension FITS file, each extension corresponding to: 0.", "array. This is sufficient to cover the detector for the fiducial lenslet spacing.", "############################################################# # x, y: Grid of lenslet IDs, Lenslet (0, 0) is the", "Directory in which the file resides ''' if infile is None: infile =", "transformation x: ndarray coordinates of lenslets y: ndarray coordinates of lenslets filtered: ndarray", "index of lenslet in lenslet array yindx: int Y index of lenslet in", "on the detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx,", "in range(coeforder + 1): for iy in range(coeforder - ix + 1): if", "''' Set up array to solve for best-fit polynomial fits to the coefficients", "wavelength in nm Returns ------- interp_lam: array Array of wavelengths R: float Effective", "list(coef) def _pullorder(coef, order=1): coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)", "_x], mode='constant', cval=np.nan, prefilter=False) vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac /", "polynomial order.\") xindx = np.arange(-100, 101) xindx, yindx = np.meshgrid(xindx, xindx) n_spline =", "polynomial coefficients Notes ----- the coefficients, if not supplied, are initially set to", "\"\"\" ################################################################### # Read in wavelengths of spots, coefficients of wavelength # solution.", "self.geninterparray(alllam, allcoef, order=order) coef = np.zeros(self.interp_arr[0].shape) for k in range(self.order + 1): coef", "[_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self, lam, alllam=None,", "k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1) _dx, _dy = _transform(xindx, yindx, coeforder,", "_y = _transform(x, y, polyorder, coef_opt) ############################################################# # Boolean: do the lenslet PSFlets", "vals_ok = vals[np.where(np.isfinite(vals))] iclip = int(vals_ok.shape[0] * trimfrac / 2) vals_sorted = np.sort(vals_ok)", "the class Parameters ---------- load: Boolean Whether to load an already-existing wavelength calibration", "to grid is hard-coded as 1/10 the dimensionality of the final array. This", "integer, coef should be a list.\") try: if not order == int(order): raise", "tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0) except: print(pix_x, pix_y) raise else: tck_y =", "the detector ''' coeforder = int(np.sqrt(coef.shape[0])) - 1 interp_x, interp_y = _transform(xindx, yindx,", "+ order + 1] = scale * np.cos(phi) return list(coef) def _pullorder(coef, order=1):", "interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y) x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix,", "XX% of the PSFlet spot fluxes (disregarding those with the most and the", "k in range(1, interporder + 1): coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k", "0.25) - 1.5 + 1e-12) _x = np.zeros(np.asarray(x).shape) _y = np.zeros(np.asarray(y).shape) i =", "high end, 5% on the low end) highordercoef: boolean Returns ------- score: float", "= y[:, :, :nlam_max] self.nlam = nlam self.lam_indx = lam_out[:, :, :nlam_max] self.nlam_max", "1 for ix in range(coeforder + 1): for iy in range(coeforder - ix", "res.success: log.info(\"Optimizing PSFlet location transformation coefficients may have failed for frame \" +", "must be integer\") n = (order + 1) * (order + 2) coef", "convolving Gaussian used for estimating the grid of centroids. Should be close to", "= signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same') unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10", "filtered, order, trimfrac=0.1, highordercoef=None): \"\"\" Private function _corrval in locate_psflets Return the negative", "scale and phi. \"\"\" try: if not order == int(order): raise ValueError(\"Polynomial order", "dy += [_dy] R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2) return interp_lam, R def monochrome_coef(self,", "a quick check to find reasonable offsets in x and y. With all" ]